Plana-Archive commited on
Commit
ed449fb
·
verified ·
1 Parent(s): 11f59ef

Total migration from Library-Anime to Plana-Archive (Maintained Structure)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. BanG-Dream-MyGO/Dockerfile +54 -0
  3. BanG-Dream-MyGO/Mutsumi-Chan.gif +3 -0
  4. BanG-Dream-MyGO/MyGO.PNG +0 -0
  5. BanG-Dream-MyGO/app.py +635 -0
  6. BanG-Dream-MyGO/config.py +99 -0
  7. BanG-Dream-MyGO/edgetts_db.py +232 -0
  8. BanG-Dream-MyGO/hubert_base.pt +3 -0
  9. BanG-Dream-MyGO/lib/infer_pack/attentions.py +417 -0
  10. BanG-Dream-MyGO/lib/infer_pack/commons.py +166 -0
  11. BanG-Dream-MyGO/lib/infer_pack/models.py +1142 -0
  12. BanG-Dream-MyGO/lib/infer_pack/models_dml.py +1124 -0
  13. BanG-Dream-MyGO/lib/infer_pack/models_onnx.py +819 -0
  14. BanG-Dream-MyGO/lib/infer_pack/modules.py +522 -0
  15. BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +90 -0
  16. BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/F0Predictor.py +16 -0
  17. BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +86 -0
  18. BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +97 -0
  19. BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/__init__.py +0 -0
  20. BanG-Dream-MyGO/lib/infer_pack/onnx_inference.py +145 -0
  21. BanG-Dream-MyGO/lib/infer_pack/transforms.py +209 -0
  22. BanG-Dream-MyGO/requirements.txt +22 -0
  23. BanG-Dream-MyGO/rmvpe.pt +3 -0
  24. BanG-Dream-MyGO/rmvpe.py +432 -0
  25. BanG-Dream-MyGO/vc_infer_pipeline.py +443 -0
  26. Bocchi-the-Rock/Bocchi Chan.gif +3 -0
  27. Bocchi-the-Rock/Bocchi-the-Rock.PNG +3 -0
  28. Bocchi-the-Rock/Dockerfile +54 -0
  29. Bocchi-the-Rock/Dockerfile.txt +54 -0
  30. Bocchi-the-Rock/app.py +922 -0
  31. Bocchi-the-Rock/config.py +28 -0
  32. Bocchi-the-Rock/edgetts_db.py +232 -0
  33. Bocchi-the-Rock/hubert_base.pt +3 -0
  34. Bocchi-the-Rock/lib/infer_pack/attentions.py +417 -0
  35. Bocchi-the-Rock/lib/infer_pack/commons.py +166 -0
  36. Bocchi-the-Rock/lib/infer_pack/models.py +1142 -0
  37. Bocchi-the-Rock/lib/infer_pack/models_dml.py +1124 -0
  38. Bocchi-the-Rock/lib/infer_pack/models_onnx.py +819 -0
  39. Bocchi-the-Rock/lib/infer_pack/modules.py +522 -0
  40. Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +90 -0
  41. Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/F0Predictor.py +16 -0
  42. Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +86 -0
  43. Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +97 -0
  44. Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/__init__.py +0 -0
  45. Bocchi-the-Rock/lib/infer_pack/onnx_inference.py +145 -0
  46. Bocchi-the-Rock/lib/infer_pack/transforms.py +209 -0
  47. Bocchi-the-Rock/requirements.txt +24 -0
  48. Bocchi-the-Rock/rmvpe.pt +3 -0
  49. Bocchi-the-Rock/rmvpe.py +432 -0
  50. Bocchi-the-Rock/vc_infer_pipeline.py +443 -0
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ BanG-Dream-MyGO/Mutsumi-Chan.gif filter=lfs diff=lfs merge=lfs -text
37
+ Bocchi-the-Rock/Bocchi[[:space:]]Chan.gif filter=lfs diff=lfs merge=lfs -text
38
+ Bocchi-the-Rock/Bocchi-the-Rock.PNG filter=lfs diff=lfs merge=lfs -text
39
+ DATE-A-LIVE/kurumi-tokisaki.gif filter=lfs diff=lfs merge=lfs -text
40
+ Waifu-Anime-RCV/soyo-nagasaki.gif filter=lfs diff=lfs merge=lfs -text
BanG-Dream-MyGO/Dockerfile ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Gunakan base image dengan hash agar match cache Hugging Face
2
+ FROM python:3.10@sha256:875c3591e586f66aa65621926230925144920c951902a6c2eef005d9783a7ca7
3
+
4
+ # Gunakan root dulu buat install awal
5
+ USER root
6
+
7
+ # Pasang fakeroot + ubah apt-get, lalu buat user UID 1000
8
+ RUN apt-get update && apt-get install -y fakeroot && \
9
+ mv /usr/bin/apt-get /usr/bin/.apt-get && \
10
+ echo '#!/usr/bin/env sh\nfakeroot /usr/bin/.apt-get "$@"' > /usr/bin/apt-get && \
11
+ chmod +x /usr/bin/apt-get && \
12
+ rm -rf /var/lib/apt/lists/* && \
13
+ useradd -m -u 1000 user
14
+
15
+ # Install dependencies umum untuk ML / Gradio / media processing
16
+ RUN apt-get update && apt-get install -y \
17
+ git \
18
+ git-lfs \
19
+ ffmpeg \
20
+ libsm6 \
21
+ libxext6 \
22
+ libgl1-mesa-glx \
23
+ cmake \
24
+ rsync \
25
+ && rm -rf /var/lib/apt/lists/* && \
26
+ git lfs install
27
+
28
+ # Switch ke user Hugging Face standard (UID 1000)
29
+ USER user
30
+ ENV HOME=/home/user \
31
+ PATH=$HOME/.local/bin:$PATH
32
+
33
+ WORKDIR $HOME/app
34
+
35
+ # Install pip versi 24.0 secara eksplisit
36
+ RUN pip install --no-cache-dir pip==24.0
37
+
38
+ # Salin requirements.txt ke tempat sementara
39
+ COPY --chown=1000:1000 requirements.txt /tmp/pre-requirements.txt
40
+
41
+ # Install Python dependencies dari project
42
+ RUN pip install --no-cache-dir -r /tmp/pre-requirements.txt
43
+
44
+ # Salin seluruh kode project
45
+ COPY --link --chown=1000:1000 . .
46
+
47
+ # Simpan semua dependency ke freeze file (buat cache HF)
48
+ RUN pip freeze > /tmp/freeze.txt
49
+
50
+ # Expose port default Gradio / FastAPI
51
+ EXPOSE 7860
52
+
53
+ # Jalankan app Python
54
+ CMD ["python3", "app.py", "--api"]
BanG-Dream-MyGO/Mutsumi-Chan.gif ADDED

Git LFS Details

  • SHA256: 63b6e495cf655cdc86be72163d6161e962a541d05bb6a95a274bbed481388838
  • Pointer size: 132 Bytes
  • Size of remote file: 1.23 MB
BanG-Dream-MyGO/MyGO.PNG ADDED
BanG-Dream-MyGO/app.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import traceback
4
+ import logging
5
+ import gradio as gr
6
+ import numpy as np
7
+ import librosa
8
+ import torch
9
+ import asyncio
10
+ import edge_tts
11
+ import re
12
+ import shutil
13
+ import time
14
+ from datetime import datetime
15
+ from fairseq import checkpoint_utils
16
+ from fairseq.data.dictionary import Dictionary
17
+ from lib.infer_pack.models import (
18
+ SynthesizerTrnMs256NSFsid,
19
+ SynthesizerTrnMs256NSFsid_nono,
20
+ SynthesizerTrnMs768NSFsid,
21
+ SynthesizerTrnMs768NSFsid_nono,
22
+ )
23
+ from vc_infer_pipeline import VC
24
+ from config import Config
25
+
26
+ # =============================
27
+ # LOAD ENVIRONMENT VARIABLES (tanpa dotenv)
28
+ # =============================
29
+ HF_TOKEN = os.getenv("HF_TOKEN")
30
+ if HF_TOKEN:
31
+ print("🔑 Hugging Face token detected")
32
+ os.environ["HUGGINGFACE_TOKEN"] = HF_TOKEN
33
+ else:
34
+ print("⚠️ No HF_TOKEN found")
35
+
36
+ # =============================
37
+ # KONFIGURASI DOWNLOAD OTOMATIS DARI REPO MODEL
38
+ # =============================
39
+ if not os.path.exists("weights"):
40
+ print("=" * 50)
41
+ print("🚀 BANGO DREAM MYGO VOICE CONVERSION")
42
+ print("=" * 50)
43
+ print("📥 Mendownload weights dan bahan model dari repo Plana-RCV/BanGDream-MyGO...")
44
+
45
+ try:
46
+ from huggingface_hub import snapshot_download
47
+
48
+ repo_id = "Plana-Archive/Premium-Model"
49
+ print(f"📥 Downloading from: {repo_id}")
50
+ print("📁 Looking for: BanGDream-MyGO")
51
+
52
+ # Download dengan pattern yang spesifik untuk BanG Dream MyGO
53
+ downloaded_path = snapshot_download(
54
+ repo_id=repo_id,
55
+ allow_patterns=[
56
+ "BanGDream-MyGO/weights/**",
57
+ "BanGDream-MyGO/hubert_base.pt",
58
+ "BanGDream-MyGO/rmvpe.pt"
59
+ ],
60
+ local_dir=".",
61
+ local_dir_use_symlinks=False,
62
+ token=HF_TOKEN if HF_TOKEN else None,
63
+ max_workers=2
64
+ )
65
+
66
+ print("✅ Download completed")
67
+
68
+ # Pindahkan file
69
+ source_dir = "BanGDream-MyGO"
70
+
71
+ if os.path.exists(source_dir):
72
+ print(f"📂 Moving files from: {source_dir}")
73
+
74
+ # Pindahkan semua konten
75
+ for item in os.listdir(source_dir):
76
+ s = os.path.join(source_dir, item)
77
+ d = os.path.join(".", item)
78
+ if os.path.isdir(s):
79
+ if os.path.exists(d):
80
+ shutil.rmtree(d)
81
+ shutil.move(s, d)
82
+ else:
83
+ shutil.move(s, d)
84
+
85
+ # Hapus folder sumber
86
+ if os.path.exists(source_dir):
87
+ shutil.rmtree(source_dir)
88
+
89
+ print("✅ Files moved successfully")
90
+
91
+ # Buat folder_info.json jika tidak ada
92
+ folder_info_path = os.path.join("weights", "folder_info.json")
93
+ if not os.path.exists(folder_info_path):
94
+ folder_info = {
95
+ "BanGDream-MyGO": {
96
+ "title": "BanG Dream! MyGO!!!!!",
97
+ "folder_path": "BanGDream-MyGO",
98
+ "description": "Official RVC Weights for BanG Dream! MyGO!!!!! characters",
99
+ "enable": True
100
+ }
101
+ }
102
+ with open(folder_info_path, "w", encoding="utf-8") as f:
103
+ json.dump(folder_info, f, indent=2, ensure_ascii=False)
104
+ print(f"📄 Created folder_info.json")
105
+
106
+ else:
107
+ print("❌ Source directory not found after download!")
108
+
109
+ except Exception as e:
110
+ print(f"⚠️ Download failed: {str(e)}")
111
+ traceback.print_exc()
112
+ print("\n📝 Manual setup:")
113
+ print("1. Create folder: weights/")
114
+ print("2. Download from: https://huggingface.co/Library-Anime/Plana-RCV/tree/main/BanGDream-MyGO")
115
+ print("3. Put BanGDream-MyGO folder in weights/")
116
+
117
+ # Inisialisasi konfigurasi
118
+ config = Config()
119
+ logging.getLogger("numba").setLevel(logging.WARNING)
120
+ logging.getLogger("fairseq").setLevel(logging.WARNING)
121
+
122
+ # Cache untuk model
123
+ model_cache = {}
124
+ hubert_loaded = False
125
+ hubert_model = None
126
+
127
+ spaces = True
128
+ if spaces:
129
+ audio_mode = ["Upload audio", "TTS Audio"]
130
+ else:
131
+ audio_mode = ["Input path", "Upload audio", "TTS Audio"]
132
+
133
+ f0method_mode = ["pm", "harvest"]
134
+ if os.path.isfile("rmvpe.pt"):
135
+ f0method_mode.insert(2, "rmvpe")
136
+
137
+ def clean_title(title):
138
+ title = re.sub(r'^BanG Dream[!]?\s*MyGO[!]*\s*-\s*', '', title, flags=re.IGNORECASE)
139
+ return re.sub(r'\s*-\s*\d+\s*epochs', '', title, flags=re.IGNORECASE)
140
+
141
+ # OPTIMASI: Audio processing yang lebih cepat
142
+ def _load_audio_input(vc_audio_mode, vc_input, vc_upload, tts_text, spaces_limit=20):
143
+ temp_file = None
144
+ try:
145
+ if vc_audio_mode == "Input path" and vc_input:
146
+ # Gunakan librosa untuk loading
147
+ audio, sr = librosa.load(vc_input, sr=16000, mono=True)
148
+ return audio.astype(np.float32), 16000, None
149
+
150
+ elif vc_audio_mode == "Upload audio":
151
+ if vc_upload is None:
152
+ raise ValueError("Mohon upload file audio terlebih dahulu!")
153
+ sampling_rate, audio = vc_upload
154
+
155
+ # Konversi ke float32
156
+ if audio.dtype != np.float32:
157
+ audio = audio.astype(np.float32) / np.iinfo(audio.dtype).max
158
+
159
+ if len(audio.shape) > 1:
160
+ audio = np.mean(audio, axis=0)
161
+
162
+ if sampling_rate != 16000:
163
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000, res_type='kaiser_fast')
164
+
165
+ return audio.astype(np.float32), 16000, None
166
+
167
+ elif vc_audio_mode == "TTS Audio":
168
+ if not tts_text or tts_text.strip() == "":
169
+ raise ValueError("Mohon masukkan teks untuk TTS!")
170
+
171
+ temp_file = "tts_temp.wav"
172
+ # Async TTS dengan timeout
173
+ async def tts_task():
174
+ return await edge_tts.Communicate(tts_text, "ja-JP-NanamiNeural").save(temp_file)
175
+
176
+ # Jalankan dengan timeout
177
+ try:
178
+ asyncio.run(asyncio.wait_for(tts_task(), timeout=10))
179
+ except asyncio.TimeoutError:
180
+ raise ValueError("TTS timeout! Silakan coba lagi.")
181
+
182
+ audio, sr = librosa.load(temp_file, sr=16000, mono=True)
183
+ return audio.astype(np.float32), 16000, temp_file
184
+
185
+ except Exception as e:
186
+ if temp_file and os.path.exists(temp_file):
187
+ os.remove(temp_file)
188
+ raise e
189
+
190
+ raise ValueError("Invalid audio mode or missing input.")
191
+
192
+ def adjust_audio_speed(audio, speed):
193
+ if speed == 1.0:
194
+ return audio
195
+ # Gunakan metode yang lebih cepat untuk time stretching
196
+ return librosa.effects.time_stretch(audio.astype(np.float32), rate=speed)
197
+
198
+ # OPTIMASI: Fungsi preprocessing audio yang lebih efisien
199
+ def preprocess_audio(audio):
200
+ # Normalize audio
201
+ if np.max(np.abs(audio)) > 1.0:
202
+ audio = audio / np.max(np.abs(audio)) * 0.9
203
+ return audio.astype(np.float32)
204
+
205
+ # OPTIMASI: Pipeline inferensi yang lebih cepat
206
+ def create_vc_fn(model_key, tgt_sr, net_g, vc, if_f0, version, file_index):
207
+ def vc_fn(
208
+ vc_audio_mode, vc_input, vc_upload, tts_text,
209
+ f0_up_key, f0_method, index_rate, filter_radius,
210
+ resample_sr, rms_mix_rate, protect, speed,
211
+ ):
212
+ temp_audio_file = None
213
+ try:
214
+ # Clear GPU cache sebelum memulai
215
+ if torch.cuda.is_available():
216
+ torch.cuda.empty_cache()
217
+
218
+ # Preload model ke GPU
219
+ net_g.to(config.device)
220
+
221
+ yield "Status: 🚀 Memproses audio...", None
222
+
223
+ # Load audio dengan optimasi
224
+ audio, sr, temp_audio_file = _load_audio_input(vc_audio_mode, vc_input, vc_upload, tts_text)
225
+
226
+ # Preprocess audio
227
+ audio = preprocess_audio(audio)
228
+
229
+ # Konversi ke tensor dengan optimasi memory
230
+ audio_tensor = torch.FloatTensor(audio).to(config.device)
231
+
232
+ times = [0, 0, 0]
233
+
234
+ # OPTIMASI: Gunakan batch processing untuk audio yang panjang
235
+ max_chunk_size = 16000 * 30 # 30 detik per chunk
236
+ if len(audio) > max_chunk_size:
237
+ chunks = []
238
+ for i in range(0, len(audio), max_chunk_size):
239
+ chunk = audio[i:i + max_chunk_size]
240
+ chunk_tensor = torch.FloatTensor(chunk).to(config.device)
241
+
242
+ chunk_opt = vc.pipeline(
243
+ hubert_model, net_g, 0, chunk_tensor,
244
+ "chunk" if vc_input else "temp", times,
245
+ int(f0_up_key), f0_method, file_index, index_rate,
246
+ if_f0, filter_radius, tgt_sr, resample_sr,
247
+ rms_mix_rate, version, protect, f0_file=None,
248
+ )
249
+ chunks.append(chunk_opt)
250
+
251
+ audio_opt = np.concatenate(chunks)
252
+ else:
253
+ # Processing single chunk
254
+ audio_opt = vc.pipeline(
255
+ hubert_model, net_g, 0, audio_tensor,
256
+ vc_input if vc_input else "temp", times,
257
+ int(f0_up_key), f0_method, file_index, index_rate,
258
+ if_f0, filter_radius, tgt_sr, resample_sr,
259
+ rms_mix_rate, version, protect, f0_file=None,
260
+ )
261
+
262
+ # Pastikan audio_opt dalam format float32
263
+ audio_opt = audio_opt.astype(np.float32)
264
+
265
+ # Apply speed adjustment
266
+ if speed != 1.0:
267
+ audio_opt = adjust_audio_speed(audio_opt, speed)
268
+
269
+ # Normalize output dan pastikan float32
270
+ if np.max(np.abs(audio_opt)) > 0:
271
+ audio_opt = (audio_opt / np.max(np.abs(audio_opt)) * 0.9).astype(np.float32)
272
+
273
+ # Return format yang sesuai untuk gradio.Audio
274
+ yield "Status: ✅ Selesai!", (tgt_sr, audio_opt)
275
+
276
+ except Exception as e:
277
+ yield f"❌ Error: {str(e)}\n\n{traceback.format_exc()}", None
278
+ finally:
279
+ # Cleanup
280
+ if temp_audio_file and os.path.exists(temp_audio_file):
281
+ os.remove(temp_audio_file)
282
+
283
+ # Kosongkan GPU cache
284
+ if torch.cuda.is_available():
285
+ torch.cuda.empty_cache()
286
+
287
+ # Return model ke CPU untuk hemat memory (kecuali untuk cache)
288
+ if model_key not in model_cache:
289
+ net_g.to('cpu')
290
+
291
+ return vc_fn
292
+
293
+ def create_model_info_from_files(base_path):
294
+ """Buat model_info.json berdasarkan file yang sebenarnya ada untuk BanG Dream MyGO"""
295
+ mygo_dir = os.path.join(base_path, "BanGDream-MyGO")
296
+ if not os.path.exists(mygo_dir):
297
+ return
298
+
299
+ model_info_path = os.path.join(mygo_dir, "model_info.json")
300
+
301
+ # Scan semua karakter dari subfolder
302
+ model_info = {}
303
+
304
+ # Cari semua folder karakter
305
+ for char_folder in os.listdir(mygo_dir):
306
+ char_path = os.path.join(mygo_dir, char_folder)
307
+ if not os.path.isdir(char_path):
308
+ continue
309
+
310
+ # Cari file dalam folder karakter
311
+ pth_files = [f for f in os.listdir(char_path) if f.endswith('.pth')]
312
+ index_files = [f for f in os.listdir(char_path) if f.endswith('.index')]
313
+ image_files = [f for f in os.listdir(char_path) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
314
+
315
+ if not pth_files:
316
+ continue
317
+
318
+ # Format nama karakter untuk judul
319
+ char_name_formatted = re.sub(r"([a-z])([A-Z])", r"\1 \2", char_folder)
320
+
321
+ model_info[char_folder] = {
322
+ "enable": True,
323
+ "model_path": pth_files[0],
324
+ "title": f"MyGO - {char_name_formatted}",
325
+ "cover": image_files[0] if image_files else "cover.png",
326
+ "feature_retrieval_library": index_files[0] if index_files else "",
327
+ "author": "Plana-Archive"
328
+ }
329
+
330
+ with open(model_info_path, "w", encoding="utf-8") as f:
331
+ json.dump(model_info, f, indent=2, ensure_ascii=False)
332
+
333
+ print(f"✅ Created model_info.json with {len(model_info)} characters")
334
+ return model_info
335
+
336
+ def load_model():
337
+ categories = []
338
+ base_path = "weights"
339
+
340
+ if not os.path.exists(base_path):
341
+ print(f"❌ Folder '{base_path}' not found!")
342
+ return categories
343
+
344
+ # Baca folder_info.json atau buat default
345
+ folder_info_path = f"{base_path}/folder_info.json"
346
+ if not os.path.isfile(folder_info_path):
347
+ print(f"📄 Creating default folder_info.json...")
348
+ folder_info = {
349
+ "BanGDream-MyGO": {
350
+ "title": "BanG Dream! MyGO!!!!!",
351
+ "folder_path": "BanGDream-MyGO",
352
+ "description": "Official RVC Weights for BanG Dream! MyGO!!!!! characters",
353
+ "enable": True
354
+ }
355
+ }
356
+
357
+ with open(folder_info_path, "w", encoding="utf-8") as f:
358
+ json.dump(folder_info, f, indent=2, ensure_ascii=False)
359
+
360
+ with open(folder_info_path, "r", encoding="utf-8") as f:
361
+ folder_info = json.load(f)
362
+
363
+ for category_name, category_info in folder_info.items():
364
+ if not category_info.get('enable', True):
365
+ continue
366
+
367
+ category_title, category_folder, description = (
368
+ category_info['title'],
369
+ category_info['folder_path'],
370
+ category_info['description']
371
+ )
372
+
373
+ models = []
374
+ model_info_path = f"{base_path}/{category_folder}/model_info.json"
375
+
376
+ # Jika model_info.json tidak ada, buat dari file yang ada
377
+ if not os.path.exists(model_info_path):
378
+ print(f" ⚠️ model_info.json not found, creating from files...")
379
+ model_info = create_model_info_from_files(base_path)
380
+ if not model_info:
381
+ continue
382
+
383
+ if os.path.exists(model_info_path):
384
+ with open(model_info_path, "r", encoding="utf-8") as f:
385
+ models_info = json.load(f)
386
+
387
+ for character_name, info in models_info.items():
388
+ if not info.get('enable', True):
389
+ continue
390
+
391
+ model_title, model_name, model_author = (
392
+ info['title'],
393
+ info['model_path'],
394
+ info.get("author")
395
+ )
396
+
397
+ # Buat key unik untuk cache
398
+ cache_key = f"{category_folder}_{character_name}"
399
+
400
+ # Gunakan cache jika tersedia
401
+ if cache_key in model_cache:
402
+ tgt_sr, net_g, vc, if_f0, version, model_index = model_cache[cache_key]
403
+ else:
404
+ model_cover = f"{base_path}/{category_folder}/{character_name}/{info['cover']}"
405
+ model_index = f"{base_path}/{category_folder}/{character_name}/{info['feature_retrieval_library']}"
406
+
407
+ # Load model weights
408
+ model_path = f"{base_path}/{category_folder}/{character_name}/{model_name}"
409
+ cpt = torch.load(model_path, map_location="cpu")
410
+
411
+ tgt_sr = cpt["config"][-1]
412
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
413
+ if_f0, version = cpt.get("f0", 1), cpt.get("version", "v1")
414
+
415
+ # Inisialisasi model
416
+ if version == "v1":
417
+ if if_f0 == 1:
418
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
419
+ else:
420
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
421
+ else:
422
+ if if_f0 == 1:
423
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
424
+ else:
425
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
426
+
427
+ # Load weights
428
+ if hasattr(net_g, "enc_q"):
429
+ del net_g.enc_q
430
+ net_g.load_state_dict(cpt["weight"], strict=False)
431
+ net_g.eval().to('cpu') # Simpan di CPU dulu
432
+
433
+ # Buat VC instance
434
+ vc = VC(tgt_sr, config)
435
+
436
+ # Cache model
437
+ model_cache[cache_key] = (tgt_sr, net_g, vc, if_f0, version, model_index)
438
+
439
+ models.append((
440
+ character_name, model_title, model_author,
441
+ f"{base_path}/{category_folder}/{character_name}/{info['cover']}",
442
+ version,
443
+ create_vc_fn(cache_key, tgt_sr, net_g, vc, if_f0, version, model_index)
444
+ ))
445
+
446
+ categories.append([category_title, category_folder, description, models])
447
+
448
+ return categories
449
+
450
+ def load_hubert():
451
+ global hubert_model, hubert_loaded
452
+ if hubert_loaded:
453
+ return
454
+
455
+ torch.serialization.add_safe_globals([Dictionary])
456
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
457
+ ["hubert_base.pt"],
458
+ suffix="",
459
+ )
460
+ hubert_model = models[0].to(config.device)
461
+ hubert_model = hubert_model.half() if config.is_half else hubert_model.float()
462
+ hubert_model.eval()
463
+ hubert_loaded = True
464
+
465
+ def change_audio_mode(vc_audio_mode):
466
+ is_input_path = vc_audio_mode == "Input path"
467
+ is_upload = vc_audio_mode == "Upload audio"
468
+ is_tts = vc_audio_mode == "TTS Audio"
469
+
470
+ return (
471
+ gr.Textbox.update(visible=is_input_path),
472
+ gr.Checkbox.update(visible=is_upload),
473
+ gr.Audio.update(visible=is_upload),
474
+ gr.Textbox.update(visible=is_tts, lines=4 if is_tts else 2)
475
+ )
476
+
477
+ def use_microphone(microphone):
478
+ return gr.Audio.update(source="microphone" if microphone else "upload")
479
+
480
+ # CSS dari app (1).py
481
+ css = """
482
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=Quicksand:wght@400;600;700&display=swap');
483
+ body, .gradio-container { background-color: #ffffff !important; font-family: 'Inter', sans-serif !important; }
484
+ footer { display: none !important; }
485
+ .arona-loading-container { display: flex; align-items: center; justify-content: center; gap: 15px; margin-top: 15px; padding: 10px; }
486
+ .loading-text-blue { font-family: 'Quicksand', sans-serif; font-size: 20px; font-weight: 700; color: #00b0ff; letter-spacing: 1px; }
487
+ .loading-gif-small { width: 100px; height: auto; border-radius: 8px; }
488
+ .header-img-container { text-align: center; padding: 10px 0; background: #ffffff !important; }
489
+ .header-img { width: 100%; max-width: 500px; border-radius: 15px; margin: 0 auto; display: block; }
490
+ .status-card { background: #ffffff; border: 1px solid #e1f0ff; border-radius: 14px; padding: 15px 10px; margin: 0 auto 15px auto; max-width: 400px; display: flex; flex-direction: column; align-items: center; }
491
+ .status-online-box { display: flex; align-items: center; gap: 8px; margin-bottom: 12px; }
492
+ .status-details-container { display: flex; width: 100%; justify-content: center; align-items: center; border-top: 1px solid #f0f7ff; padding-top: 10px; }
493
+ .status-detail-item { flex: 1; display: flex; flex-direction: column; align-items: center; text-align: center; }
494
+ .status-detail-item:first-child { border-right: 1px solid #e1f0ff; }
495
+ .status-text-main { font-size: 13px !important; font-weight: 600; color: #546e7a; }
496
+ .status-text-sub { font-size: 11px !important; color: #90a4ae; }
497
+ .dot-online { height: 8px; width: 8px; background-color: #2ecc71; border-radius: 50%; display: inline-block; animation: blink-green 1.5s infinite; }
498
+ @keyframes blink-green { 0% { opacity: 1; } 50% { opacity: 0.4; } 100% { opacity: 1; } }
499
+ .gr-form .gr-block label span, .gr-box label span, .gr-panel label span { background: linear-gradient(135deg, #4fc3f7 0%, #00b0ff 100%) !important; color: white !important; padding: 4px 12px !important; border-radius: 8px !important; font-weight: 600 !important; box-shadow: 0 0 15px rgba(79, 195, 247, 0.4) !important; }
500
+ input[type="range"] { accent-color: #00b0ff !important; }
501
+ .char-scroll-box { display: grid !important; grid-template-columns: repeat(2, 1fr) !important; gap: 12px !important; max-height: 280px; overflow-y: auto; padding: 15px; background: #ffffff; border: 2px solid #eef5ff; border-radius: 14px; }
502
+ .char-card { background: white; padding: 12px; border-radius: 12px; cursor: pointer; border: 1px solid #e1f5fe; border-left: 5px solid #4fc3f7; transition: all 0.2s ease; display: flex; flex-direction: column; height: 65px; }
503
+ .char-name-jp { font-weight: 700; font-size: 11px !important; color: #455a64; }
504
+ .char-name-en { font-size: 8.5px !important; color: #90a4ae; text-transform: uppercase; }
505
+ .speed-section { margin-top: 20px; padding: 18px; border-radius: 20px; background: linear-gradient(135deg, #f0f7ff 0%, #ffffff 100%); border: 2px solid #e1f0ff; }
506
+ .speed-title { font-family: 'Quicksand', sans-serif; font-weight: 700; color: #4ea8de; text-align: center; margin-bottom: 12px; font-size: 14px; }
507
+ .generate-btn { font-family: 'Quicksand', sans-serif; font-weight: 700 !important; background: linear-gradient(135deg, #64b5f6 0%, #2196f3 100%) !important; color: white !important; border-radius: 12px !important; }
508
+ .footer-text { text-align: center; padding: 20px; border-top: 1px solid #f0f4f8; color: #b0bec5; font-size: 11px; }
509
+ .speed-notes-box { font-family: 'Arial'; border: 1px solid #ffd8b2; border-radius: 8px; padding: 12px; background: #fff7ed; border-left: 4px solid #fb923c; margin-top: 10px; }
510
+ .speed-notes-title { color: #c2410c; font-size: 12px; margin: 0 0 5px 0; font-weight: bold; }
511
+ .speed-notes-content { color: #9a3412; font-size: 11px; margin: 0; }
512
+ .video-demo-container { text-align: center; padding: 20px; background: #ffffff; border-radius: 20px; border: 2px solid #e1f0ff; margin: 20px auto; max-width: 800px; }
513
+ .video-demo-title { font-family: 'Quicksand', sans-serif; font-weight: 700; color: #4fc3f7; font-size: 18px; margin-bottom: 15px; }
514
+ .video-demo-player { width: 100%; border-radius: 15px; box-shadow: 0 10px 30px rgba(0, 176, 255, 0.2); }
515
+ """
516
+
517
+ if __name__ == '__main__':
518
+ # Preload hubert model
519
+ load_hubert()
520
+
521
+ # Load models dengan cache
522
+ categories = load_model()
523
+ total_models = sum(len(models) for _, _, _, models in categories)
524
+
525
+ # Optimasi Gradio dengan queue yang lebih efisien
526
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as app:
527
+ # Update header image untuk BanG Dream MyGO
528
+ gr.HTML('<div class="header-img-container"><img src="https://huggingface.co/spaces/Library-Anime/BanG-Dream-MyGO/resolve/main/MyGO.PNG" class="header-img"></div>')
529
+
530
+ gr.HTML(f'''
531
+ <div class="status-card">
532
+ <div class="status-online-box"><span class="dot-online"></span><b style="color: #4fc3f7; font-size: 14px;">System Online</b></div>
533
+ <div class="status-details-container">
534
+ <div class="status-detail-item"><span class="status-text-main">🎸 {total_models} Members</span><span class="status-text-sub">Ready</span></div>
535
+ <div class="status-detail-item"><span class="status-text-main">📊 Total</span><span class="status-text-sub">Database: {total_models}</span></div>
536
+ </div>
537
+ </div>
538
+ ''')
539
+
540
+ # VIDEO DEMO
541
+ with gr.Row():
542
+ with gr.Column(scale=1):
543
+ pass
544
+ with gr.Column(scale=3):
545
+ gr.HTML("""
546
+ <div class="video-demo-container">
547
+ <div class="video-demo-title">✅ [ON] MODE YURI 💚</div>
548
+ <video class="video-demo-player" controls autoplay loop muted playsinline>
549
+ <source src="https://huggingface.co/spaces/BanG-Dream-MyGO/README/resolve/main/Sakiko Chan.mp4" type="video/mp4">
550
+ Your browser does not support the video tag.
551
+ </video>
552
+ </div>
553
+ """)
554
+ with gr.Column(scale=1):
555
+ pass
556
+
557
+ for cat_idx, (folder_title, folder, description, models) in enumerate(categories):
558
+ with gr.TabItem(folder_title):
559
+ with gr.Accordion("📑 Select Member", open=True):
560
+ char_html = "".join([f'<div class="char-card" onclick="selectModel(\'{folder_title}\', \'{name}\')"><span class="char-name-jp">{clean_title(title)}</span><span class="char-name-en">{name}</span></div>' for name, title, author, cover, version, vc_fn in models])
561
+ gr.HTML(f'<div class="char-scroll-box">{char_html}</div>')
562
+
563
+ with gr.Tabs():
564
+ for model_idx, (name, title, author, cover, model_version, vc_fn) in enumerate(models):
565
+ with gr.TabItem(name, id=f"model_{cat_idx}_{model_idx}"):
566
+ with gr.Row():
567
+ with gr.Column(scale=1):
568
+ gr.HTML(f'<div style="display:flex;flex-direction:column;align-items:center;padding:20px;background:white;border-radius:20px;border:1px solid #eef5ff;"><img style="width:200px;height:260px;object-fit:cover;border-radius:15px;" src="file/{cover}"><div style="font-family:\'Quicksand\',sans-serif;font-weight:700;font-size:18px;color:#039be5;margin-top:15px;">{clean_title(title)}</div><div style="font-size:11px;color:#b0bec5;margin-top:5px;">{model_version} • {author}</div></div>')
569
+
570
+ with gr.Column(scale=2):
571
+ with gr.Group():
572
+ vc_audio_mode = gr.Dropdown(label="Input Mode", choices=audio_mode, value="TTS Audio")
573
+ vc_input = gr.Textbox(visible=False)
574
+ vc_microphone_mode = gr.Checkbox(label="Use Microphone", value=False)
575
+ vc_upload = gr.Audio(label="Upload Audio Source", source="upload", visible=False)
576
+ tts_text = gr.Textbox(label="TTS Text", visible=True, placeholder="Type message here...", lines=3)
577
+
578
+ with gr.Row():
579
+ with gr.Column():
580
+ vc_transform0 = gr.Slider(minimum=-12, maximum=12, label="Pitch (Nada)", value=12, step=1)
581
+ f0method0 = gr.Radio(label="Conversion Algorithm", choices=f0method_mode, value="rmvpe")
582
+ with gr.Column():
583
+ with gr.Accordion("⚙️ SETTINGS ⚙️", open=False):
584
+ index_rate1 = gr.Slider(0, 1, label="Index Rate", value=0.75)
585
+ filter_radius0 = gr.Slider(0, 7, label="Filter", value=7, step=1)
586
+ resample_sr0 = gr.Slider(0, 48000, label="Resample", value=0)
587
+ rms_mix_rate0 = gr.Slider(0, 1, label="Volume Mix", value=0.76)
588
+ protect0 = gr.Slider(0, 0.5, label="Voice Protect", value=0.33)
589
+
590
+ # BOX NOTES & SARAN - DIHAPUS
591
+
592
+ with gr.Column(elem_classes="speed-section"):
593
+ gr.HTML('<div class="speed-title">⚡ KECEPATAN SUARA ⚡</div>')
594
+ speed_slider = gr.Slider(0.5, 2.0, value=1.0, step=0.1, label=None)
595
+
596
+ # NOTES KHUSUS UNTUK SLIDER KECEPATAN
597
+ gr.HTML("""<div class="speed-notes-box">
598
+ <div class="speed-notes-title">🌥️ CATATAN KECIL 🌥️</div>
599
+ <div class="speed-notes-content">
600
+ • <b>Kiri (0.5):</b> untuk mempercepat Suara<br>
601
+ • <b>✅ Tengah (1.0):</b> untuk Kecepatan normal (disarankan)<br>
602
+ • <b>🚫 Kanan (2.0):</b> Mempercepat suara (tidak di sarankan)<br><br>
603
+ <b>Tips:</b> Atur ke kiri untuk suara lebih lambat dan atur ke kanan untuk suara lebih cepat. Disarankan tetap di 1.0 untuk hasil normal atau ubah jadi 08 atau 09.
604
+ </div>
605
+ </div>""")
606
+
607
+ gr.HTML('<div class="arona-loading-container"><div class="loading-text-blue">Let\'s Play Music!</div><img class="loading-gif-small" src="https://huggingface.co/spaces/Library-Anime/BanG-Dream-MyGO/resolve/main/Mutsumi-Chan.gif"></div>')
608
+
609
+ with gr.Column(scale=1):
610
+ vc_log = gr.Textbox(label="Process Logs", interactive=False)
611
+ vc_output = gr.Audio(label="Result Audio", interactive=False)
612
+ vc_convert = gr.Button("🎸 GENERATE VOICE 🎸", variant="primary", elem_classes="generate-btn")
613
+
614
+ vc_convert.click(
615
+ fn=vc_fn,
616
+ inputs=[vc_audio_mode, vc_input, vc_upload, tts_text, vc_transform0, f0method0,
617
+ index_rate1, filter_radius0, resample_sr0, rms_mix_rate0, protect0, speed_slider],
618
+ outputs=[vc_log, vc_output]
619
+ )
620
+ vc_audio_mode.change(fn=change_audio_mode, inputs=[vc_audio_mode], outputs=[vc_input, vc_microphone_mode, vc_upload, tts_text])
621
+ vc_microphone_mode.change(fn=use_microphone, inputs=vc_microphone_mode, outputs=vc_upload)
622
+
623
+ gr.HTML('<div class="footer-text"><div>DESIGNED BY 🎸 Mutsumi Chan 🎸</div><div style="font-weight:700; color:#90a4ae;">BanG Dream! MyGO!!!!! Voice Conversion • 2024</div></div>')
624
+ app.load(None, None, None, js="""() => { window.selectModel = (cat, mod) => { const tabs = document.querySelectorAll('.tabs .tab-nav button'); for (let t of tabs) { if (t.textContent.trim() === cat) { t.click(); setTimeout(() => { const mTabs = document.querySelectorAll('.tabs .tab-nav button'); for (let mt of mTabs) { if (mt.textContent.trim() === mod) mt.click(); } }, 50); break; } } } }""")
625
+
626
+ # DIPERBAIKI: Sesuaikan dengan Gradio
627
+ app.queue(
628
+ max_size=3
629
+ ).launch(
630
+ share=False,
631
+ server_name="0.0.0.0" if os.getenv('SPACE_ID') else "127.0.0.1",
632
+ server_port=7860,
633
+ quiet=True,
634
+ show_error=True
635
+ )
BanG-Dream-MyGO/config.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import sys
3
+ import torch
4
+ from multiprocessing import cpu_count
5
+
6
+ class Config:
7
+ def __init__(self):
8
+ self.device = "cuda:0"
9
+ self.is_half = True
10
+ self.n_cpu = 0
11
+ self.gpu_name = None
12
+ self.gpu_mem = None
13
+ (
14
+ self.colab,
15
+ self.api,
16
+ self.unsupported
17
+ ) = self.arg_parse()
18
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
19
+
20
+ @staticmethod
21
+ def arg_parse() -> tuple:
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
24
+ parser.add_argument("--api", action="store_true", help="Launch with api")
25
+ parser.add_argument("--unsupported", action="store_true", help="Enable unsupported feature")
26
+ cmd_opts = parser.parse_args()
27
+
28
+ return (
29
+ cmd_opts.colab,
30
+ cmd_opts.api,
31
+ cmd_opts.unsupported
32
+ )
33
+
34
+ # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
35
+ # check `getattr` and try it for compatibility
36
+ @staticmethod
37
+ def has_mps() -> bool:
38
+ if not torch.backends.mps.is_available():
39
+ return False
40
+ try:
41
+ torch.zeros(1).to(torch.device("mps"))
42
+ return True
43
+ except Exception:
44
+ return False
45
+
46
+ def device_config(self) -> tuple:
47
+ if torch.cuda.is_available():
48
+ i_device = int(self.device.split(":")[-1])
49
+ self.gpu_name = torch.cuda.get_device_name(i_device)
50
+ if (
51
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
52
+ or "P40" in self.gpu_name.upper()
53
+ or "1060" in self.gpu_name
54
+ or "1070" in self.gpu_name
55
+ or "1080" in self.gpu_name
56
+ ):
57
+ print("INFO: Found GPU", self.gpu_name, ", force to fp32")
58
+ self.is_half = False
59
+ else:
60
+ print("INFO: Found GPU", self.gpu_name)
61
+ self.gpu_mem = int(
62
+ torch.cuda.get_device_properties(i_device).total_memory
63
+ / 1024
64
+ / 1024
65
+ / 1024
66
+ + 0.4
67
+ )
68
+ elif self.has_mps():
69
+ print("INFO: No supported Nvidia GPU found, use MPS instead")
70
+ self.device = "mps"
71
+ self.is_half = False
72
+ else:
73
+ print("INFO: No supported Nvidia GPU found, use CPU instead")
74
+ self.device = "cpu"
75
+ self.is_half = False
76
+
77
+ if self.n_cpu == 0:
78
+ self.n_cpu = cpu_count()
79
+
80
+ if self.is_half:
81
+ # 6G显存配置
82
+ x_pad = 3
83
+ x_query = 10
84
+ x_center = 60
85
+ x_max = 65
86
+ else:
87
+ # 5G显存配置
88
+ x_pad = 1
89
+ x_query = 6
90
+ x_center = 38
91
+ x_max = 41
92
+
93
+ if self.gpu_mem != None and self.gpu_mem <= 4:
94
+ x_pad = 1
95
+ x_query = 5
96
+ x_center = 30
97
+ x_max = 32
98
+
99
+ return x_pad, x_query, x_center, x_max
BanG-Dream-MyGO/edgetts_db.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tts_order_voice = {
2
+ 'English-Jenny (Female)': 'en-US-JennyNeural',
3
+ 'English-Guy (Male)': 'en-US-GuyNeural',
4
+ 'English-Ana (Female)': 'en-US-AnaNeural',
5
+ 'English-Aria (Female)': 'en-US-AriaNeural',
6
+ 'English-Christopher (Male)': 'en-US-ChristopherNeural',
7
+ 'English-Eric (Male)': 'en-US-EricNeural',
8
+ 'English-Michelle (Female)': 'en-US-MichelleNeural',
9
+ 'English-Roger (Male)': 'en-US-RogerNeural',
10
+ 'Spanish (Mexican)-Dalia (Female)': 'es-MX-DaliaNeural',
11
+ 'Spanish (Mexican)-Jorge- (Male)': 'es-MX-JorgeNeural',
12
+ 'Korean-Sun-Hi- (Female)': 'ko-KR-SunHiNeural',
13
+ 'Korean-InJoon- (Male)': 'ko-KR-InJoonNeural',
14
+ 'Thai-Premwadee- (Female)': 'th-TH-PremwadeeNeural',
15
+ 'Thai-Niwat- (Male)': 'th-TH-NiwatNeural',
16
+ 'Vietnamese-HoaiMy- (Female)': 'vi-VN-HoaiMyNeural',
17
+ 'Vietnamese-NamMinh- (Male)': 'vi-VN-NamMinhNeural',
18
+ 'Japanese-Nanami- (Female)': 'ja-JP-NanamiNeural',
19
+ 'Japanese-Keita- (Male)': 'ja-JP-KeitaNeural',
20
+ 'French-Denise- (Female)': 'fr-FR-DeniseNeural',
21
+ 'French-Eloise- (Female)': 'fr-FR-EloiseNeural',
22
+ 'French-Henri- (Male)': 'fr-FR-HenriNeural',
23
+ 'Brazilian-Francisca- (Female)': 'pt-BR-FranciscaNeural',
24
+ 'Brazilian-Antonio- (Male)': 'pt-BR-AntonioNeural',
25
+ 'Indonesian-Ardi- (Male)': 'id-ID-ArdiNeural',
26
+ 'Indonesian-Gadis- (Female)': 'id-ID-GadisNeural',
27
+ 'Hebrew-Avri- (Male)': 'he-IL-AvriNeural',
28
+ 'Hebrew-Hila- (Female)': 'he-IL-HilaNeural',
29
+ 'Italian-Isabella- (Female)': 'it-IT-IsabellaNeural',
30
+ 'Italian-Diego- (Male)': 'it-IT-DiegoNeural',
31
+ 'Italian-Elsa- (Female)': 'it-IT-ElsaNeural',
32
+ 'Dutch-Colette- (Female)': 'nl-NL-ColetteNeural',
33
+ 'Dutch-Fenna- (Female)': 'nl-NL-FennaNeural',
34
+ 'Dutch-Maarten- (Male)': 'nl-NL-MaartenNeural',
35
+ 'Malese-Osman- (Male)': 'ms-MY-OsmanNeural',
36
+ 'Malese-Yasmin- (Female)': 'ms-MY-YasminNeural',
37
+ 'Norwegian-Pernille- (Female)': 'nb-NO-PernilleNeural',
38
+ 'Norwegian-Finn- (Male)': 'nb-NO-FinnNeural',
39
+ 'Swedish-Sofie- (Female)': 'sv-SE-SofieNeural',
40
+ 'ArabicSwedish-Mattias- (Male)': 'sv-SE-MattiasNeural',
41
+ 'Arabic-Hamed- (Male)': 'ar-SA-HamedNeural',
42
+ 'Arabic-Zariyah- (Female)': 'ar-SA-ZariyahNeural',
43
+ 'Greek-Athina- (Female)': 'el-GR-AthinaNeural',
44
+ 'Greek-Nestoras- (Male)': 'el-GR-NestorasNeural',
45
+ 'German-Katja- (Female)': 'de-DE-KatjaNeural',
46
+ 'German-Amala- (Female)': 'de-DE-AmalaNeural',
47
+ 'German-Conrad- (Male)': 'de-DE-ConradNeural',
48
+ 'German-Killian- (Male)': 'de-DE-KillianNeural',
49
+ 'Afrikaans-Adri- (Female)': 'af-ZA-AdriNeural',
50
+ 'Afrikaans-Willem- (Male)': 'af-ZA-WillemNeural',
51
+ 'Ethiopian-Ameha- (Male)': 'am-ET-AmehaNeural',
52
+ 'Ethiopian-Mekdes- (Female)': 'am-ET-MekdesNeural',
53
+ 'Arabic (UAD)-Fatima- (Female)': 'ar-AE-FatimaNeural',
54
+ 'Arabic (UAD)-Hamdan- (Male)': 'ar-AE-HamdanNeural',
55
+ 'Arabic (Bahrain)-Ali- (Male)': 'ar-BH-AliNeural',
56
+ 'Arabic (Bahrain)-Laila- (Female)': 'ar-BH-LailaNeural',
57
+ 'Arabic (Algeria)-Ismael- (Male)': 'ar-DZ-IsmaelNeural',
58
+ 'Arabic (Egypt)-Salma- (Female)': 'ar-EG-SalmaNeural',
59
+ 'Arabic (Egypt)-Shakir- (Male)': 'ar-EG-ShakirNeural',
60
+ 'Arabic (Iraq)-Bassel- (Male)': 'ar-IQ-BasselNeural',
61
+ 'Arabic (Iraq)-Rana- (Female)': 'ar-IQ-RanaNeural',
62
+ 'Arabic (Jordan)-Sana- (Female)': 'ar-JO-SanaNeural',
63
+ 'Arabic (Jordan)-Taim- (Male)': 'ar-JO-TaimNeural',
64
+ 'Arabic (Kuwait)-Fahed- (Male)': 'ar-KW-FahedNeural',
65
+ 'Arabic (Kuwait)-Noura- (Female)': 'ar-KW-NouraNeural',
66
+ 'Arabic (Lebanon)-Layla- (Female)': 'ar-LB-LaylaNeural',
67
+ 'Arabic (Lebanon)-Rami- (Male)': 'ar-LB-RamiNeural',
68
+ 'Arabic (Libya)-Iman- (Female)': 'ar-LY-ImanNeural',
69
+ 'Arabic (Libya)-Omar- (Male)': 'ar-LY-OmarNeural',
70
+ 'Arabic (Morocco)-Jamal- (Male)': 'ar-MA-JamalNeural',
71
+ 'Arabic (Morocco)-Mouna- (Female)': 'ar-MA-MounaNeural',
72
+ 'Arabic (Oman)-Abdullah- (Male)': 'ar-OM-AbdullahNeural',
73
+ 'Arabic (Oman)-Aysha- (Female)': 'ar-OM-AyshaNeural',
74
+ 'Arabic (Qatar)-Amal- (Female)': 'ar-QA-AmalNeural',
75
+ 'Arabic (Qatar)-Moaz- (Male)': 'ar-QA-MoazNeural',
76
+ 'Arabic (Syrian Arab Republic)-Amany- (Female)': 'ar-SY-AmanyNeural',
77
+ 'Arabic (Syrian Arab Republic)-Laith- (Male)': 'ar-SY-LaithNeural',
78
+ 'Arabic (Tunisia)-Hedi- (Male)': 'ar-TN-HediNeural',
79
+ 'Arabic (Tunisia)-Reem- (Female)': 'ar-TN-ReemNeural',
80
+ 'Arabic (Yemen )-Maryam- (Female)': 'ar-YE-MaryamNeural',
81
+ 'Arabic (Yemen )-Saleh- (Male)': 'ar-YE-SalehNeural',
82
+ 'Azerbaijani-Babek- (Male)': 'az-AZ-BabekNeural',
83
+ 'Azerbaijani-Banu- (Female)': 'az-AZ-BanuNeural',
84
+ 'Bulgarian-Borislav- (Male)': 'bg-BG-BorislavNeural',
85
+ 'Bulgarian-Kalina- (Female)': 'bg-BG-KalinaNeural',
86
+ 'Bengali (Bangladesh)-Nabanita- (Female)': 'bn-BD-NabanitaNeural',
87
+ 'Bengali (Bangladesh)-Pradeep- (Male)': 'bn-BD-PradeepNeural',
88
+ 'Bengali (India)-Bashkar- (Male)': 'bn-IN-BashkarNeural',
89
+ 'Bengali (India)-Tanishaa- (Female)': 'bn-IN-TanishaaNeural',
90
+ 'Bosniak (Bosnia and Herzegovina)-Goran- (Male)': 'bs-BA-GoranNeural',
91
+ 'Bosniak (Bosnia and Herzegovina)-Vesna- (Female)': 'bs-BA-VesnaNeural',
92
+ 'Catalan (Spain)-Joana- (Female)': 'ca-ES-JoanaNeural',
93
+ 'Catalan (Spain)-Enric- (Male)': 'ca-ES-EnricNeural',
94
+ 'Czech (Czech Republic)-Antonin- (Male)': 'cs-CZ-AntoninNeural',
95
+ 'Czech (Czech Republic)-Vlasta- (Female)': 'cs-CZ-VlastaNeural',
96
+ 'Welsh (UK)-Aled- (Male)': 'cy-GB-AledNeural',
97
+ 'Welsh (UK)-Nia- (Female)': 'cy-GB-NiaNeural',
98
+ 'Danish (Denmark)-Christel- (Female)': 'da-DK-ChristelNeural',
99
+ 'Danish (Denmark)-Jeppe- (Male)': 'da-DK-JeppeNeural',
100
+ 'German (Austria)-Ingrid- (Female)': 'de-AT-IngridNeural',
101
+ 'German (Austria)-Jonas- (Male)': 'de-AT-JonasNeural',
102
+ 'German (Switzerland)-Jan- (Male)': 'de-CH-JanNeural',
103
+ 'German (Switzerland)-Leni- (Female)': 'de-CH-LeniNeural',
104
+ 'English (Australia)-Natasha- (Female)': 'en-AU-NatashaNeural',
105
+ 'English (Australia)-William- (Male)': 'en-AU-WilliamNeural',
106
+ 'English (Canada)-Clara- (Female)': 'en-CA-ClaraNeural',
107
+ 'English (Canada)-Liam- (Male)': 'en-CA-LiamNeural',
108
+ 'English (UK)-Libby- (Female)': 'en-GB-LibbyNeural',
109
+ 'English (UK)-Maisie- (Female)': 'en-GB-MaisieNeural',
110
+ 'English (UK)-Ryan- (Male)': 'en-GB-RyanNeural',
111
+ 'English (UK)-Sonia- (Female)': 'en-GB-SoniaNeural',
112
+ 'English (UK)-Thomas- (Male)': 'en-GB-ThomasNeural',
113
+ 'English (Hong Kong)-Sam- (Male)': 'en-HK-SamNeural',
114
+ 'English (Hong Kong)-Yan- (Female)': 'en-HK-YanNeural',
115
+ 'English (Ireland)-Connor- (Male)': 'en-IE-ConnorNeural',
116
+ 'English (Ireland)-Emily- (Female)': 'en-IE-EmilyNeural',
117
+ 'English (India)-Neerja- (Female)': 'en-IN-NeerjaNeural',
118
+ 'English (India)-Prabhat- (Male)': 'en-IN-PrabhatNeural',
119
+ 'English (Kenya)-Asilia- (Female)': 'en-KE-AsiliaNeural',
120
+ 'English (Kenya)-Chilemba- (Male)': 'en-KE-ChilembaNeural',
121
+ 'English (Nigeria)-Abeo- (Male)': 'en-NG-AbeoNeural',
122
+ 'English (Nigeria)-Ezinne- (Female)': 'en-NG-EzinneNeural',
123
+ 'English (New Zealand)-Mitchell- (Male)': 'en-NZ-MitchellNeural',
124
+ 'English (Philippines)-James- (Male)': 'en-PH-JamesNeural',
125
+ 'English (Philippines)-Rosa- (Female)': 'en-PH-RosaNeural',
126
+ 'English (Singapore)-Luna- (Female)': 'en-SG-LunaNeural',
127
+ 'English (Singapore)-Wayne- (Male)': 'en-SG-WayneNeural',
128
+ 'English (Tanzania)-Elimu- (Male)': 'en-TZ-ElimuNeural',
129
+ 'English (Tanzania)-Imani- (Female)': 'en-TZ-ImaniNeural',
130
+ 'English (South Africa)-Leah- (Female)': 'en-ZA-LeahNeural',
131
+ 'English (South Africa)-Luke- (Male)': 'en-ZA-LukeNeural',
132
+ 'Spanish (Argentina)-Elena- (Female)': 'es-AR-ElenaNeural',
133
+ 'Spanish (Argentina)-Tomas- (Male)': 'es-AR-TomasNeural',
134
+ 'Spanish (Bolivia)-Marcelo- (Male)': 'es-BO-MarceloNeural',
135
+ 'Spanish (Bolivia)-Sofia- (Female)': 'es-BO-SofiaNeural',
136
+ 'Spanish (Colombia)-Gonzalo- (Male)': 'es-CO-GonzaloNeural',
137
+ 'Spanish (Colombia)-Salome- (Female)': 'es-CO-SalomeNeural',
138
+ 'Spanish (Costa Rica)-Juan- (Male)': 'es-CR-JuanNeural',
139
+ 'Spanish (Costa Rica)-Maria- (Female)': 'es-CR-MariaNeural',
140
+ 'Spanish (Cuba)-Belkys- (Female)': 'es-CU-BelkysNeural',
141
+ 'Spanish (Dominican Republic)-Emilio- (Male)': 'es-DO-EmilioNeural',
142
+ 'Spanish (Dominican Republic)-Ramona- (Female)': 'es-DO-RamonaNeural',
143
+ 'Spanish (Ecuador)-Andrea- (Female)': 'es-EC-AndreaNeural',
144
+ 'Spanish (Ecuador)-Luis- (Male)': 'es-EC-LuisNeural',
145
+ 'Spanish (Spain)-Alvaro- (Male)': 'es-ES-AlvaroNeural',
146
+ 'Spanish (Spain)-Elvira- (Female)': 'es-ES-ElviraNeural',
147
+ 'Spanish (Equatorial Guinea)-Teresa- (Female)': 'es-GQ-TeresaNeural',
148
+ 'Spanish (Guatemala)-Andres- (Male)': 'es-GT-AndresNeural',
149
+ 'Spanish (Guatemala)-Marta- (Female)': 'es-GT-MartaNeural',
150
+ 'Spanish (Honduras)-Carlos- (Male)': 'es-HN-CarlosNeural',
151
+ 'Spanish (Honduras)-Karla- (Female)': 'es-HN-KarlaNeural',
152
+ 'Spanish (Nicaragua)-Federico- (Male)': 'es-NI-FedericoNeural',
153
+ 'Spanish (Nicaragua)-Yolanda- (Female)': 'es-NI-YolandaNeural',
154
+ 'Spanish (Panama)-Margarita- (Female)': 'es-PA-MargaritaNeural',
155
+ 'Spanish (Panama)-Roberto- (Male)': 'es-PA-RobertoNeural',
156
+ 'Spanish (Peru)-Alex- (Male)': 'es-PE-AlexNeural',
157
+ 'Spanish (Peru)-Camila- (Female)': 'es-PE-CamilaNeural',
158
+ 'Spanish (Puerto Rico)-Karina- (Female)': 'es-PR-KarinaNeural',
159
+ 'Spanish (Puerto Rico)-Victor- (Male)': 'es-PR-VictorNeural',
160
+ 'Spanish (Paraguay)-Mario- (Male)': 'es-PY-MarioNeural',
161
+ 'Spanish (Paraguay)-Tania- (Female)': 'es-PY-TaniaNeural',
162
+ 'Spanish (El Salvador)-Lorena- (Female)': 'es-SV-LorenaNeural',
163
+ 'Spanish (El Salvador)-Rodrigo- (Male)': 'es-SV-RodrigoNeural',
164
+ 'Spanish (United States)-Alonso- (Male)': 'es-US-AlonsoNeural',
165
+ 'Spanish (United States)-Paloma- (Female)': 'es-US-PalomaNeural',
166
+ 'Spanish (Uruguay)-Mateo- (Male)': 'es-UY-MateoNeural',
167
+ 'Spanish (Uruguay)-Valentina- (Female)': 'es-UY-ValentinaNeural',
168
+ 'Spanish (Venezuela)-Paola- (Female)': 'es-VE-PaolaNeural',
169
+ 'Spanish (Venezuela)-Sebastian- (Male)': 'es-VE-SebastianNeural',
170
+ 'Estonian (Estonia)-Anu- (Female)': 'et-EE-AnuNeural',
171
+ 'Estonian (Estonia)-Kert- (Male)': 'et-EE-KertNeural',
172
+ 'Persian (Iran)-Dilara- (Female)': 'fa-IR-DilaraNeural',
173
+ 'Persian (Iran)-Farid- (Male)': 'fa-IR-FaridNeural',
174
+ 'Finnish (Finland)-Harri- (Male)': 'fi-FI-HarriNeural',
175
+ 'Finnish (Finland)-Noora- (Female)': 'fi-FI-NooraNeural',
176
+ 'French (Belgium)-Charline- (Female)': 'fr-BE-CharlineNeural',
177
+ 'French (Belgium)-Gerard- (Male)': 'fr-BE-GerardNeural',
178
+ 'French (Canada)-Sylvie- (Female)': 'fr-CA-SylvieNeural',
179
+ 'French (Canada)-Antoine- (Male)': 'fr-CA-AntoineNeural',
180
+ 'French (Canada)-Jean- (Male)': 'fr-CA-JeanNeural',
181
+ 'French (Switzerland)-Ariane- (Female)': 'fr-CH-ArianeNeural',
182
+ 'French (Switzerland)-Fabrice- (Male)': 'fr-CH-FabriceNeural',
183
+ 'Irish (Ireland)-Colm- (Male)': 'ga-IE-ColmNeural',
184
+ 'Irish (Ireland)-Orla- (Female)': 'ga-IE-OrlaNeural',
185
+ 'Galician (Spain)-Roi- (Male)': 'gl-ES-RoiNeural',
186
+ 'Galician (Spain)-Sabela- (Female)': 'gl-ES-SabelaNeural',
187
+ 'Gujarati (India)-Dhwani- (Female)': 'gu-IN-DhwaniNeural',
188
+ 'Gujarati (India)-Niranjan- (Male)': 'gu-IN-NiranjanNeural',
189
+ 'Hindi (India)-Madhur- (Male)': 'hi-IN-MadhurNeural',
190
+ 'Hindi (India)-Swara- (Female)': 'hi-IN-SwaraNeural',
191
+ 'Croatian (Croatia)-Gabrijela- (Female)': 'hr-HR-GabrijelaNeural',
192
+ 'Croatian (Croatia)-Srecko- (Male)': 'hr-HR-SreckoNeural',
193
+ 'Hungarian (Hungary)-Noemi- (Female)': 'hu-HU-NoemiNeural',
194
+ 'Hungarian (Hungary)-Tamas- (Male)': 'hu-HU-TamasNeural',
195
+ 'Icelandic (Iceland)-Gudrun- (Female)': 'is-IS-GudrunNeural',
196
+ 'Icelandic (Iceland)-Gunnar- (Male)': 'is-IS-GunnarNeural',
197
+ 'Javanese (Indonesia)-Dimas- (Male)': 'jv-ID-DimasNeural',
198
+ 'Javanese (Indonesia)-Siti- (Female)': 'jv-ID-SitiNeural',
199
+ 'Georgian (Georgia)-Eka- (Female)': 'ka-GE-EkaNeural',
200
+ 'Georgian (Georgia)-Giorgi- (Male)': 'ka-GE-GiorgiNeural',
201
+ 'Kazakh (Kazakhstan)-Aigul- (Female)': 'kk-KZ-AigulNeural',
202
+ 'Kazakh (Kazakhstan)-Daulet- (Male)': 'kk-KZ-DauletNeural',
203
+ 'Khmer (Cambodia)-Piseth- (Male)': 'km-KH-PisethNeural',
204
+ 'Khmer (Cambodia)-Sreymom- (Female)': 'km-KH-SreymomNeural',
205
+ 'Kannada (India)-Gagan- (Male)': 'kn-IN-GaganNeural',
206
+ 'Kannada (India)-Sapna- (Female)': 'kn-IN-SapnaNeural',
207
+ 'Lao (Laos)-Chanthavong- (Male)': 'lo-LA-ChanthavongNeural',
208
+ 'Lao (Laos)-Keomany- (Female)': 'lo-LA-KeomanyNeural',
209
+ 'Lithuanian (Lithuania)-Leonas- (Male)': 'lt-LT-LeonasNeural',
210
+ 'Lithuanian (Lithuania)-Ona- (Female)': 'lt-LT-OnaNeural',
211
+ 'Latvian (Latvia)-Everita- (Female)': 'lv-LV-EveritaNeural',
212
+ 'Latvian (Latvia)-Nils- (Male)': 'lv-LV-NilsNeural',
213
+ 'Macedonian (North Macedonia)-Aleksandar- (Male)': 'mk-MK-AleksandarNeural',
214
+ 'Macedonian (North Macedonia)-Marija- (Female)': 'mk-MK-MarijaNeural',
215
+ 'Malayalam (India)-Midhun- (Male)': 'ml-IN-MidhunNeural',
216
+ 'Malayalam (India)-Sobhana- (Female)': 'ml-IN-SobhanaNeural',
217
+ 'Mongolian (Mongolia)-Bataa- (Male)': 'mn-MN-BataaNeural',
218
+ 'Mongolian (Mongolia)-Yesui- (Female)': 'mn-MN-YesuiNeural',
219
+ 'Marathi (India)-Aarohi- (Female)': 'mr-IN-AarohiNeural',
220
+ 'Marathi (India)-Manohar- (Male)': 'mr-IN-ManoharNeural',
221
+ 'Maltese (Malta)-Grace- (Female)': 'mt-MT-GraceNeural',
222
+ 'Maltese (Malta)-Joseph- (Male)': 'mt-MT-JosephNeural',
223
+ 'Burmese (Myanmar)-Nilar- (Female)': 'my-MM-NilarNeural',
224
+ 'Burmese (Myanmar)-Thiha- (Male)': 'my-MM-ThihaNeural',
225
+ 'Nepali (Nepal)-Hemkala- (Female)': 'ne-NP-HemkalaNeural',
226
+ 'Nepali (Nepal)-Sagar- (Male)': 'ne-NP-SagarNeural',
227
+ 'Dutch (Belgium)-Arnaud- (Male)': 'nl-BE-ArnaudNeural',
228
+ 'Dutch (Belgium)-Dena- (Female)': 'nl-BE-DenaNeural',
229
+ 'Polish (Poland)-Marek- (Male)': 'pl-PL-MarekNeural',
230
+ 'Polish (Poland)-Zofia- (Female)': 'pl-PL-ZofiaNeural',
231
+ 'Pashto (Afghanistan)-Gul Nawaz- (Male)': 'ps-AF-Gul',
232
+ }
BanG-Dream-MyGO/hubert_base.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96
3
+ size 189507909
BanG-Dream-MyGO/lib/infer_pack/attentions.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack import modules
10
+ from lib.infer_pack.modules import LayerNorm
11
+
12
+
13
+ class Encoder(nn.Module):
14
+ def __init__(
15
+ self,
16
+ hidden_channels,
17
+ filter_channels,
18
+ n_heads,
19
+ n_layers,
20
+ kernel_size=1,
21
+ p_dropout=0.0,
22
+ window_size=10,
23
+ **kwargs
24
+ ):
25
+ super().__init__()
26
+ self.hidden_channels = hidden_channels
27
+ self.filter_channels = filter_channels
28
+ self.n_heads = n_heads
29
+ self.n_layers = n_layers
30
+ self.kernel_size = kernel_size
31
+ self.p_dropout = p_dropout
32
+ self.window_size = window_size
33
+
34
+ self.drop = nn.Dropout(p_dropout)
35
+ self.attn_layers = nn.ModuleList()
36
+ self.norm_layers_1 = nn.ModuleList()
37
+ self.ffn_layers = nn.ModuleList()
38
+ self.norm_layers_2 = nn.ModuleList()
39
+ for i in range(self.n_layers):
40
+ self.attn_layers.append(
41
+ MultiHeadAttention(
42
+ hidden_channels,
43
+ hidden_channels,
44
+ n_heads,
45
+ p_dropout=p_dropout,
46
+ window_size=window_size,
47
+ )
48
+ )
49
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
50
+ self.ffn_layers.append(
51
+ FFN(
52
+ hidden_channels,
53
+ hidden_channels,
54
+ filter_channels,
55
+ kernel_size,
56
+ p_dropout=p_dropout,
57
+ )
58
+ )
59
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
60
+
61
+ def forward(self, x, x_mask):
62
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
63
+ x = x * x_mask
64
+ for i in range(self.n_layers):
65
+ y = self.attn_layers[i](x, x, attn_mask)
66
+ y = self.drop(y)
67
+ x = self.norm_layers_1[i](x + y)
68
+
69
+ y = self.ffn_layers[i](x, x_mask)
70
+ y = self.drop(y)
71
+ x = self.norm_layers_2[i](x + y)
72
+ x = x * x_mask
73
+ return x
74
+
75
+
76
+ class Decoder(nn.Module):
77
+ def __init__(
78
+ self,
79
+ hidden_channels,
80
+ filter_channels,
81
+ n_heads,
82
+ n_layers,
83
+ kernel_size=1,
84
+ p_dropout=0.0,
85
+ proximal_bias=False,
86
+ proximal_init=True,
87
+ **kwargs
88
+ ):
89
+ super().__init__()
90
+ self.hidden_channels = hidden_channels
91
+ self.filter_channels = filter_channels
92
+ self.n_heads = n_heads
93
+ self.n_layers = n_layers
94
+ self.kernel_size = kernel_size
95
+ self.p_dropout = p_dropout
96
+ self.proximal_bias = proximal_bias
97
+ self.proximal_init = proximal_init
98
+
99
+ self.drop = nn.Dropout(p_dropout)
100
+ self.self_attn_layers = nn.ModuleList()
101
+ self.norm_layers_0 = nn.ModuleList()
102
+ self.encdec_attn_layers = nn.ModuleList()
103
+ self.norm_layers_1 = nn.ModuleList()
104
+ self.ffn_layers = nn.ModuleList()
105
+ self.norm_layers_2 = nn.ModuleList()
106
+ for i in range(self.n_layers):
107
+ self.self_attn_layers.append(
108
+ MultiHeadAttention(
109
+ hidden_channels,
110
+ hidden_channels,
111
+ n_heads,
112
+ p_dropout=p_dropout,
113
+ proximal_bias=proximal_bias,
114
+ proximal_init=proximal_init,
115
+ )
116
+ )
117
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
118
+ self.encdec_attn_layers.append(
119
+ MultiHeadAttention(
120
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
121
+ )
122
+ )
123
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
124
+ self.ffn_layers.append(
125
+ FFN(
126
+ hidden_channels,
127
+ hidden_channels,
128
+ filter_channels,
129
+ kernel_size,
130
+ p_dropout=p_dropout,
131
+ causal=True,
132
+ )
133
+ )
134
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
135
+
136
+ def forward(self, x, x_mask, h, h_mask):
137
+ """
138
+ x: decoder input
139
+ h: encoder output
140
+ """
141
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
142
+ device=x.device, dtype=x.dtype
143
+ )
144
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
145
+ x = x * x_mask
146
+ for i in range(self.n_layers):
147
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
148
+ y = self.drop(y)
149
+ x = self.norm_layers_0[i](x + y)
150
+
151
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
152
+ y = self.drop(y)
153
+ x = self.norm_layers_1[i](x + y)
154
+
155
+ y = self.ffn_layers[i](x, x_mask)
156
+ y = self.drop(y)
157
+ x = self.norm_layers_2[i](x + y)
158
+ x = x * x_mask
159
+ return x
160
+
161
+
162
+ class MultiHeadAttention(nn.Module):
163
+ def __init__(
164
+ self,
165
+ channels,
166
+ out_channels,
167
+ n_heads,
168
+ p_dropout=0.0,
169
+ window_size=None,
170
+ heads_share=True,
171
+ block_length=None,
172
+ proximal_bias=False,
173
+ proximal_init=False,
174
+ ):
175
+ super().__init__()
176
+ assert channels % n_heads == 0
177
+
178
+ self.channels = channels
179
+ self.out_channels = out_channels
180
+ self.n_heads = n_heads
181
+ self.p_dropout = p_dropout
182
+ self.window_size = window_size
183
+ self.heads_share = heads_share
184
+ self.block_length = block_length
185
+ self.proximal_bias = proximal_bias
186
+ self.proximal_init = proximal_init
187
+ self.attn = None
188
+
189
+ self.k_channels = channels // n_heads
190
+ self.conv_q = nn.Conv1d(channels, channels, 1)
191
+ self.conv_k = nn.Conv1d(channels, channels, 1)
192
+ self.conv_v = nn.Conv1d(channels, channels, 1)
193
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
194
+ self.drop = nn.Dropout(p_dropout)
195
+
196
+ if window_size is not None:
197
+ n_heads_rel = 1 if heads_share else n_heads
198
+ rel_stddev = self.k_channels**-0.5
199
+ self.emb_rel_k = nn.Parameter(
200
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
201
+ * rel_stddev
202
+ )
203
+ self.emb_rel_v = nn.Parameter(
204
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
205
+ * rel_stddev
206
+ )
207
+
208
+ nn.init.xavier_uniform_(self.conv_q.weight)
209
+ nn.init.xavier_uniform_(self.conv_k.weight)
210
+ nn.init.xavier_uniform_(self.conv_v.weight)
211
+ if proximal_init:
212
+ with torch.no_grad():
213
+ self.conv_k.weight.copy_(self.conv_q.weight)
214
+ self.conv_k.bias.copy_(self.conv_q.bias)
215
+
216
+ def forward(self, x, c, attn_mask=None):
217
+ q = self.conv_q(x)
218
+ k = self.conv_k(c)
219
+ v = self.conv_v(c)
220
+
221
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
222
+
223
+ x = self.conv_o(x)
224
+ return x
225
+
226
+ def attention(self, query, key, value, mask=None):
227
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
228
+ b, d, t_s, t_t = (*key.size(), query.size(2))
229
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
230
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
231
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
232
+
233
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
234
+ if self.window_size is not None:
235
+ assert (
236
+ t_s == t_t
237
+ ), "Relative attention is only available for self-attention."
238
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
239
+ rel_logits = self._matmul_with_relative_keys(
240
+ query / math.sqrt(self.k_channels), key_relative_embeddings
241
+ )
242
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
243
+ scores = scores + scores_local
244
+ if self.proximal_bias:
245
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
246
+ scores = scores + self._attention_bias_proximal(t_s).to(
247
+ device=scores.device, dtype=scores.dtype
248
+ )
249
+ if mask is not None:
250
+ scores = scores.masked_fill(mask == 0, -1e4)
251
+ if self.block_length is not None:
252
+ assert (
253
+ t_s == t_t
254
+ ), "Local attention is only available for self-attention."
255
+ block_mask = (
256
+ torch.ones_like(scores)
257
+ .triu(-self.block_length)
258
+ .tril(self.block_length)
259
+ )
260
+ scores = scores.masked_fill(block_mask == 0, -1e4)
261
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
262
+ p_attn = self.drop(p_attn)
263
+ output = torch.matmul(p_attn, value)
264
+ if self.window_size is not None:
265
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
266
+ value_relative_embeddings = self._get_relative_embeddings(
267
+ self.emb_rel_v, t_s
268
+ )
269
+ output = output + self._matmul_with_relative_values(
270
+ relative_weights, value_relative_embeddings
271
+ )
272
+ output = (
273
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
274
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
275
+ return output, p_attn
276
+
277
+ def _matmul_with_relative_values(self, x, y):
278
+ """
279
+ x: [b, h, l, m]
280
+ y: [h or 1, m, d]
281
+ ret: [b, h, l, d]
282
+ """
283
+ ret = torch.matmul(x, y.unsqueeze(0))
284
+ return ret
285
+
286
+ def _matmul_with_relative_keys(self, x, y):
287
+ """
288
+ x: [b, h, l, d]
289
+ y: [h or 1, m, d]
290
+ ret: [b, h, l, m]
291
+ """
292
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
293
+ return ret
294
+
295
+ def _get_relative_embeddings(self, relative_embeddings, length):
296
+ max_relative_position = 2 * self.window_size + 1
297
+ # Pad first before slice to avoid using cond ops.
298
+ pad_length = max(length - (self.window_size + 1), 0)
299
+ slice_start_position = max((self.window_size + 1) - length, 0)
300
+ slice_end_position = slice_start_position + 2 * length - 1
301
+ if pad_length > 0:
302
+ padded_relative_embeddings = F.pad(
303
+ relative_embeddings,
304
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
305
+ )
306
+ else:
307
+ padded_relative_embeddings = relative_embeddings
308
+ used_relative_embeddings = padded_relative_embeddings[
309
+ :, slice_start_position:slice_end_position
310
+ ]
311
+ return used_relative_embeddings
312
+
313
+ def _relative_position_to_absolute_position(self, x):
314
+ """
315
+ x: [b, h, l, 2*l-1]
316
+ ret: [b, h, l, l]
317
+ """
318
+ batch, heads, length, _ = x.size()
319
+ # Concat columns of pad to shift from relative to absolute indexing.
320
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
321
+
322
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
323
+ x_flat = x.view([batch, heads, length * 2 * length])
324
+ x_flat = F.pad(
325
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
326
+ )
327
+
328
+ # Reshape and slice out the padded elements.
329
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
330
+ :, :, :length, length - 1 :
331
+ ]
332
+ return x_final
333
+
334
+ def _absolute_position_to_relative_position(self, x):
335
+ """
336
+ x: [b, h, l, l]
337
+ ret: [b, h, l, 2*l-1]
338
+ """
339
+ batch, heads, length, _ = x.size()
340
+ # padd along column
341
+ x = F.pad(
342
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
343
+ )
344
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
345
+ # add 0's in the beginning that will skew the elements after reshape
346
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
347
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
348
+ return x_final
349
+
350
+ def _attention_bias_proximal(self, length):
351
+ """Bias for self-attention to encourage attention to close positions.
352
+ Args:
353
+ length: an integer scalar.
354
+ Returns:
355
+ a Tensor with shape [1, 1, length, length]
356
+ """
357
+ r = torch.arange(length, dtype=torch.float32)
358
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
359
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
360
+
361
+
362
+ class FFN(nn.Module):
363
+ def __init__(
364
+ self,
365
+ in_channels,
366
+ out_channels,
367
+ filter_channels,
368
+ kernel_size,
369
+ p_dropout=0.0,
370
+ activation=None,
371
+ causal=False,
372
+ ):
373
+ super().__init__()
374
+ self.in_channels = in_channels
375
+ self.out_channels = out_channels
376
+ self.filter_channels = filter_channels
377
+ self.kernel_size = kernel_size
378
+ self.p_dropout = p_dropout
379
+ self.activation = activation
380
+ self.causal = causal
381
+
382
+ if causal:
383
+ self.padding = self._causal_padding
384
+ else:
385
+ self.padding = self._same_padding
386
+
387
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
388
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
389
+ self.drop = nn.Dropout(p_dropout)
390
+
391
+ def forward(self, x, x_mask):
392
+ x = self.conv_1(self.padding(x * x_mask))
393
+ if self.activation == "gelu":
394
+ x = x * torch.sigmoid(1.702 * x)
395
+ else:
396
+ x = torch.relu(x)
397
+ x = self.drop(x)
398
+ x = self.conv_2(self.padding(x * x_mask))
399
+ return x * x_mask
400
+
401
+ def _causal_padding(self, x):
402
+ if self.kernel_size == 1:
403
+ return x
404
+ pad_l = self.kernel_size - 1
405
+ pad_r = 0
406
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
407
+ x = F.pad(x, commons.convert_pad_shape(padding))
408
+ return x
409
+
410
+ def _same_padding(self, x):
411
+ if self.kernel_size == 1:
412
+ return x
413
+ pad_l = (self.kernel_size - 1) // 2
414
+ pad_r = self.kernel_size // 2
415
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
416
+ x = F.pad(x, commons.convert_pad_shape(padding))
417
+ return x
BanG-Dream-MyGO/lib/infer_pack/commons.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size * dilation - dilation) / 2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
25
+ """KL(P||Q)"""
26
+ kl = (logs_q - logs_p) - 0.5
27
+ kl += (
28
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
+ )
30
+ return kl
31
+
32
+
33
+ def rand_gumbel(shape):
34
+ """Sample from the Gumbel distribution, protect from overflows."""
35
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
+ return -torch.log(-torch.log(uniform_samples))
37
+
38
+
39
+ def rand_gumbel_like(x):
40
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
+ return g
42
+
43
+
44
+ def slice_segments(x, ids_str, segment_size=4):
45
+ ret = torch.zeros_like(x[:, :, :segment_size])
46
+ for i in range(x.size(0)):
47
+ idx_str = ids_str[i]
48
+ idx_end = idx_str + segment_size
49
+ ret[i] = x[i, :, idx_str:idx_end]
50
+ return ret
51
+
52
+
53
+ def slice_segments2(x, ids_str, segment_size=4):
54
+ ret = torch.zeros_like(x[:, :segment_size])
55
+ for i in range(x.size(0)):
56
+ idx_str = ids_str[i]
57
+ idx_end = idx_str + segment_size
58
+ ret[i] = x[i, idx_str:idx_end]
59
+ return ret
60
+
61
+
62
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
+ b, d, t = x.size()
64
+ if x_lengths is None:
65
+ x_lengths = t
66
+ ids_str_max = x_lengths - segment_size + 1
67
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
+ ret = slice_segments(x, ids_str, segment_size)
69
+ return ret, ids_str
70
+
71
+
72
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
+ position = torch.arange(length, dtype=torch.float)
74
+ num_timescales = channels // 2
75
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
+ num_timescales - 1
77
+ )
78
+ inv_timescales = min_timescale * torch.exp(
79
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
+ )
81
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
84
+ signal = signal.view(1, channels, length)
85
+ return signal
86
+
87
+
88
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
+ b, channels, length = x.size()
90
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
+ return x + signal.to(dtype=x.dtype, device=x.device)
92
+
93
+
94
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
+ b, channels, length = x.size()
96
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
+
99
+
100
+ def subsequent_mask(length):
101
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
+ return mask
103
+
104
+
105
+ @torch.jit.script
106
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
+ n_channels_int = n_channels[0]
108
+ in_act = input_a + input_b
109
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
+ acts = t_act * s_act
112
+ return acts
113
+
114
+
115
+ def convert_pad_shape(pad_shape):
116
+ l = pad_shape[::-1]
117
+ pad_shape = [item for sublist in l for item in sublist]
118
+ return pad_shape
119
+
120
+
121
+ def shift_1d(x):
122
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
+ return x
124
+
125
+
126
+ def sequence_mask(length, max_length=None):
127
+ if max_length is None:
128
+ max_length = length.max()
129
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
+ return x.unsqueeze(0) < length.unsqueeze(1)
131
+
132
+
133
+ def generate_path(duration, mask):
134
+ """
135
+ duration: [b, 1, t_x]
136
+ mask: [b, 1, t_y, t_x]
137
+ """
138
+ device = duration.device
139
+
140
+ b, _, t_y, t_x = mask.shape
141
+ cum_duration = torch.cumsum(duration, -1)
142
+
143
+ cum_duration_flat = cum_duration.view(b * t_x)
144
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
+ path = path.view(b, t_x, t_y)
146
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
+ path = path.unsqueeze(1).transpose(2, 3) * mask
148
+ return path
149
+
150
+
151
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
152
+ if isinstance(parameters, torch.Tensor):
153
+ parameters = [parameters]
154
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
155
+ norm_type = float(norm_type)
156
+ if clip_value is not None:
157
+ clip_value = float(clip_value)
158
+
159
+ total_norm = 0
160
+ for p in parameters:
161
+ param_norm = p.grad.data.norm(norm_type)
162
+ total_norm += param_norm.item() ** norm_type
163
+ if clip_value is not None:
164
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
+ total_norm = total_norm ** (1.0 / norm_type)
166
+ return total_norm
BanG-Dream-MyGO/lib/infer_pack/models.py ADDED
@@ -0,0 +1,1142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math, pdb, os
2
+ from time import time as ttime
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from lib.infer_pack import modules
7
+ from lib.infer_pack import attentions
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack.commons import init_weights, get_padding
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from lib.infer_pack.commons import init_weights
13
+ import numpy as np
14
+ from lib.infer_pack import commons
15
+
16
+
17
+ class TextEncoder256(nn.Module):
18
+ def __init__(
19
+ self,
20
+ out_channels,
21
+ hidden_channels,
22
+ filter_channels,
23
+ n_heads,
24
+ n_layers,
25
+ kernel_size,
26
+ p_dropout,
27
+ f0=True,
28
+ ):
29
+ super().__init__()
30
+ self.out_channels = out_channels
31
+ self.hidden_channels = hidden_channels
32
+ self.filter_channels = filter_channels
33
+ self.n_heads = n_heads
34
+ self.n_layers = n_layers
35
+ self.kernel_size = kernel_size
36
+ self.p_dropout = p_dropout
37
+ self.emb_phone = nn.Linear(256, hidden_channels)
38
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
+ if f0 == True:
40
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
+ self.encoder = attentions.Encoder(
42
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
+ )
44
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
+
46
+ def forward(self, phone, pitch, lengths):
47
+ if pitch == None:
48
+ x = self.emb_phone(phone)
49
+ else:
50
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
+ x = self.lrelu(x)
53
+ x = torch.transpose(x, 1, -1) # [b, h, t]
54
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
+ x.dtype
56
+ )
57
+ x = self.encoder(x * x_mask, x_mask)
58
+ stats = self.proj(x) * x_mask
59
+
60
+ m, logs = torch.split(stats, self.out_channels, dim=1)
61
+ return m, logs, x_mask
62
+
63
+
64
+ class TextEncoder768(nn.Module):
65
+ def __init__(
66
+ self,
67
+ out_channels,
68
+ hidden_channels,
69
+ filter_channels,
70
+ n_heads,
71
+ n_layers,
72
+ kernel_size,
73
+ p_dropout,
74
+ f0=True,
75
+ ):
76
+ super().__init__()
77
+ self.out_channels = out_channels
78
+ self.hidden_channels = hidden_channels
79
+ self.filter_channels = filter_channels
80
+ self.n_heads = n_heads
81
+ self.n_layers = n_layers
82
+ self.kernel_size = kernel_size
83
+ self.p_dropout = p_dropout
84
+ self.emb_phone = nn.Linear(768, hidden_channels)
85
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
+ if f0 == True:
87
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
+ self.encoder = attentions.Encoder(
89
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
+ )
91
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
+
93
+ def forward(self, phone, pitch, lengths):
94
+ if pitch == None:
95
+ x = self.emb_phone(phone)
96
+ else:
97
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
+ x = self.lrelu(x)
100
+ x = torch.transpose(x, 1, -1) # [b, h, t]
101
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
+ x.dtype
103
+ )
104
+ x = self.encoder(x * x_mask, x_mask)
105
+ stats = self.proj(x) * x_mask
106
+
107
+ m, logs = torch.split(stats, self.out_channels, dim=1)
108
+ return m, logs, x_mask
109
+
110
+
111
+ class ResidualCouplingBlock(nn.Module):
112
+ def __init__(
113
+ self,
114
+ channels,
115
+ hidden_channels,
116
+ kernel_size,
117
+ dilation_rate,
118
+ n_layers,
119
+ n_flows=4,
120
+ gin_channels=0,
121
+ ):
122
+ super().__init__()
123
+ self.channels = channels
124
+ self.hidden_channels = hidden_channels
125
+ self.kernel_size = kernel_size
126
+ self.dilation_rate = dilation_rate
127
+ self.n_layers = n_layers
128
+ self.n_flows = n_flows
129
+ self.gin_channels = gin_channels
130
+
131
+ self.flows = nn.ModuleList()
132
+ for i in range(n_flows):
133
+ self.flows.append(
134
+ modules.ResidualCouplingLayer(
135
+ channels,
136
+ hidden_channels,
137
+ kernel_size,
138
+ dilation_rate,
139
+ n_layers,
140
+ gin_channels=gin_channels,
141
+ mean_only=True,
142
+ )
143
+ )
144
+ self.flows.append(modules.Flip())
145
+
146
+ def forward(self, x, x_mask, g=None, reverse=False):
147
+ if not reverse:
148
+ for flow in self.flows:
149
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
+ else:
151
+ for flow in reversed(self.flows):
152
+ x = flow(x, x_mask, g=g, reverse=reverse)
153
+ return x
154
+
155
+ def remove_weight_norm(self):
156
+ for i in range(self.n_flows):
157
+ self.flows[i * 2].remove_weight_norm()
158
+
159
+
160
+ class PosteriorEncoder(nn.Module):
161
+ def __init__(
162
+ self,
163
+ in_channels,
164
+ out_channels,
165
+ hidden_channels,
166
+ kernel_size,
167
+ dilation_rate,
168
+ n_layers,
169
+ gin_channels=0,
170
+ ):
171
+ super().__init__()
172
+ self.in_channels = in_channels
173
+ self.out_channels = out_channels
174
+ self.hidden_channels = hidden_channels
175
+ self.kernel_size = kernel_size
176
+ self.dilation_rate = dilation_rate
177
+ self.n_layers = n_layers
178
+ self.gin_channels = gin_channels
179
+
180
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
+ self.enc = modules.WN(
182
+ hidden_channels,
183
+ kernel_size,
184
+ dilation_rate,
185
+ n_layers,
186
+ gin_channels=gin_channels,
187
+ )
188
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
+
190
+ def forward(self, x, x_lengths, g=None):
191
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
+ x.dtype
193
+ )
194
+ x = self.pre(x) * x_mask
195
+ x = self.enc(x, x_mask, g=g)
196
+ stats = self.proj(x) * x_mask
197
+ m, logs = torch.split(stats, self.out_channels, dim=1)
198
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
+ return z, m, logs, x_mask
200
+
201
+ def remove_weight_norm(self):
202
+ self.enc.remove_weight_norm()
203
+
204
+
205
+ class Generator(torch.nn.Module):
206
+ def __init__(
207
+ self,
208
+ initial_channel,
209
+ resblock,
210
+ resblock_kernel_sizes,
211
+ resblock_dilation_sizes,
212
+ upsample_rates,
213
+ upsample_initial_channel,
214
+ upsample_kernel_sizes,
215
+ gin_channels=0,
216
+ ):
217
+ super(Generator, self).__init__()
218
+ self.num_kernels = len(resblock_kernel_sizes)
219
+ self.num_upsamples = len(upsample_rates)
220
+ self.conv_pre = Conv1d(
221
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
222
+ )
223
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
+
225
+ self.ups = nn.ModuleList()
226
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
+ self.ups.append(
228
+ weight_norm(
229
+ ConvTranspose1d(
230
+ upsample_initial_channel // (2**i),
231
+ upsample_initial_channel // (2 ** (i + 1)),
232
+ k,
233
+ u,
234
+ padding=(k - u) // 2,
235
+ )
236
+ )
237
+ )
238
+
239
+ self.resblocks = nn.ModuleList()
240
+ for i in range(len(self.ups)):
241
+ ch = upsample_initial_channel // (2 ** (i + 1))
242
+ for j, (k, d) in enumerate(
243
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
+ ):
245
+ self.resblocks.append(resblock(ch, k, d))
246
+
247
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
+ self.ups.apply(init_weights)
249
+
250
+ if gin_channels != 0:
251
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
+
253
+ def forward(self, x, g=None):
254
+ x = self.conv_pre(x)
255
+ if g is not None:
256
+ x = x + self.cond(g)
257
+
258
+ for i in range(self.num_upsamples):
259
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
+ x = self.ups[i](x)
261
+ xs = None
262
+ for j in range(self.num_kernels):
263
+ if xs is None:
264
+ xs = self.resblocks[i * self.num_kernels + j](x)
265
+ else:
266
+ xs += self.resblocks[i * self.num_kernels + j](x)
267
+ x = xs / self.num_kernels
268
+ x = F.leaky_relu(x)
269
+ x = self.conv_post(x)
270
+ x = torch.tanh(x)
271
+
272
+ return x
273
+
274
+ def remove_weight_norm(self):
275
+ for l in self.ups:
276
+ remove_weight_norm(l)
277
+ for l in self.resblocks:
278
+ l.remove_weight_norm()
279
+
280
+
281
+ class SineGen(torch.nn.Module):
282
+ """Definition of sine generator
283
+ SineGen(samp_rate, harmonic_num = 0,
284
+ sine_amp = 0.1, noise_std = 0.003,
285
+ voiced_threshold = 0,
286
+ flag_for_pulse=False)
287
+ samp_rate: sampling rate in Hz
288
+ harmonic_num: number of harmonic overtones (default 0)
289
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
290
+ noise_std: std of Gaussian noise (default 0.003)
291
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
+ Note: when flag_for_pulse is True, the first time step of a voiced
294
+ segment is always sin(np.pi) or cos(0)
295
+ """
296
+
297
+ def __init__(
298
+ self,
299
+ samp_rate,
300
+ harmonic_num=0,
301
+ sine_amp=0.1,
302
+ noise_std=0.003,
303
+ voiced_threshold=0,
304
+ flag_for_pulse=False,
305
+ ):
306
+ super(SineGen, self).__init__()
307
+ self.sine_amp = sine_amp
308
+ self.noise_std = noise_std
309
+ self.harmonic_num = harmonic_num
310
+ self.dim = self.harmonic_num + 1
311
+ self.sampling_rate = samp_rate
312
+ self.voiced_threshold = voiced_threshold
313
+
314
+ def _f02uv(self, f0):
315
+ # generate uv signal
316
+ uv = torch.ones_like(f0)
317
+ uv = uv * (f0 > self.voiced_threshold)
318
+ return uv
319
+
320
+ def forward(self, f0, upp):
321
+ """sine_tensor, uv = forward(f0)
322
+ input F0: tensor(batchsize=1, length, dim=1)
323
+ f0 for unvoiced steps should be 0
324
+ output sine_tensor: tensor(batchsize=1, length, dim)
325
+ output uv: tensor(batchsize=1, length, 1)
326
+ """
327
+ with torch.no_grad():
328
+ f0 = f0[:, None].transpose(1, 2)
329
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
+ # fundamental component
331
+ f0_buf[:, :, 0] = f0[:, :, 0]
332
+ for idx in np.arange(self.harmonic_num):
333
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
+ idx + 2
335
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
+ rand_ini = torch.rand(
338
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
+ )
340
+ rand_ini[:, 0] = 0
341
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
+ tmp_over_one *= upp
344
+ tmp_over_one = F.interpolate(
345
+ tmp_over_one.transpose(2, 1),
346
+ scale_factor=upp,
347
+ mode="linear",
348
+ align_corners=True,
349
+ ).transpose(2, 1)
350
+ rad_values = F.interpolate(
351
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
+ ).transpose(
353
+ 2, 1
354
+ ) #######
355
+ tmp_over_one %= 1
356
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
+ cumsum_shift = torch.zeros_like(rad_values)
358
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
+ sine_waves = torch.sin(
360
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
+ )
362
+ sine_waves = sine_waves * self.sine_amp
363
+ uv = self._f02uv(f0)
364
+ uv = F.interpolate(
365
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
+ ).transpose(2, 1)
367
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
+ noise = noise_amp * torch.randn_like(sine_waves)
369
+ sine_waves = sine_waves * uv + noise
370
+ return sine_waves, uv, noise
371
+
372
+
373
+ class SourceModuleHnNSF(torch.nn.Module):
374
+ """SourceModule for hn-nsf
375
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
+ add_noise_std=0.003, voiced_threshod=0)
377
+ sampling_rate: sampling_rate in Hz
378
+ harmonic_num: number of harmonic above F0 (default: 0)
379
+ sine_amp: amplitude of sine source signal (default: 0.1)
380
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
381
+ note that amplitude of noise in unvoiced is decided
382
+ by sine_amp
383
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
384
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
+ F0_sampled (batchsize, length, 1)
386
+ Sine_source (batchsize, length, 1)
387
+ noise_source (batchsize, length 1)
388
+ uv (batchsize, length, 1)
389
+ """
390
+
391
+ def __init__(
392
+ self,
393
+ sampling_rate,
394
+ harmonic_num=0,
395
+ sine_amp=0.1,
396
+ add_noise_std=0.003,
397
+ voiced_threshod=0,
398
+ is_half=True,
399
+ ):
400
+ super(SourceModuleHnNSF, self).__init__()
401
+
402
+ self.sine_amp = sine_amp
403
+ self.noise_std = add_noise_std
404
+ self.is_half = is_half
405
+ # to produce sine waveforms
406
+ self.l_sin_gen = SineGen(
407
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
+ )
409
+
410
+ # to merge source harmonics into a single excitation
411
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
+ self.l_tanh = torch.nn.Tanh()
413
+
414
+ def forward(self, x, upp=None):
415
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
+ if self.is_half:
417
+ sine_wavs = sine_wavs.half()
418
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
+ return sine_merge, None, None # noise, uv
420
+
421
+
422
+ class GeneratorNSF(torch.nn.Module):
423
+ def __init__(
424
+ self,
425
+ initial_channel,
426
+ resblock,
427
+ resblock_kernel_sizes,
428
+ resblock_dilation_sizes,
429
+ upsample_rates,
430
+ upsample_initial_channel,
431
+ upsample_kernel_sizes,
432
+ gin_channels,
433
+ sr,
434
+ is_half=False,
435
+ ):
436
+ super(GeneratorNSF, self).__init__()
437
+ self.num_kernels = len(resblock_kernel_sizes)
438
+ self.num_upsamples = len(upsample_rates)
439
+
440
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
+ self.m_source = SourceModuleHnNSF(
442
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
443
+ )
444
+ self.noise_convs = nn.ModuleList()
445
+ self.conv_pre = Conv1d(
446
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
447
+ )
448
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
+
450
+ self.ups = nn.ModuleList()
451
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
453
+ self.ups.append(
454
+ weight_norm(
455
+ ConvTranspose1d(
456
+ upsample_initial_channel // (2**i),
457
+ upsample_initial_channel // (2 ** (i + 1)),
458
+ k,
459
+ u,
460
+ padding=(k - u) // 2,
461
+ )
462
+ )
463
+ )
464
+ if i + 1 < len(upsample_rates):
465
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
466
+ self.noise_convs.append(
467
+ Conv1d(
468
+ 1,
469
+ c_cur,
470
+ kernel_size=stride_f0 * 2,
471
+ stride=stride_f0,
472
+ padding=stride_f0 // 2,
473
+ )
474
+ )
475
+ else:
476
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
+
478
+ self.resblocks = nn.ModuleList()
479
+ for i in range(len(self.ups)):
480
+ ch = upsample_initial_channel // (2 ** (i + 1))
481
+ for j, (k, d) in enumerate(
482
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
+ ):
484
+ self.resblocks.append(resblock(ch, k, d))
485
+
486
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
+ self.ups.apply(init_weights)
488
+
489
+ if gin_channels != 0:
490
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
+
492
+ self.upp = np.prod(upsample_rates)
493
+
494
+ def forward(self, x, f0, g=None):
495
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
496
+ har_source = har_source.transpose(1, 2)
497
+ x = self.conv_pre(x)
498
+ if g is not None:
499
+ x = x + self.cond(g)
500
+
501
+ for i in range(self.num_upsamples):
502
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
+ x = self.ups[i](x)
504
+ x_source = self.noise_convs[i](har_source)
505
+ x = x + x_source
506
+ xs = None
507
+ for j in range(self.num_kernels):
508
+ if xs is None:
509
+ xs = self.resblocks[i * self.num_kernels + j](x)
510
+ else:
511
+ xs += self.resblocks[i * self.num_kernels + j](x)
512
+ x = xs / self.num_kernels
513
+ x = F.leaky_relu(x)
514
+ x = self.conv_post(x)
515
+ x = torch.tanh(x)
516
+ return x
517
+
518
+ def remove_weight_norm(self):
519
+ for l in self.ups:
520
+ remove_weight_norm(l)
521
+ for l in self.resblocks:
522
+ l.remove_weight_norm()
523
+
524
+
525
+ sr2sr = {
526
+ "32k": 32000,
527
+ "40k": 40000,
528
+ "48k": 48000,
529
+ }
530
+
531
+
532
+ class SynthesizerTrnMs256NSFsid(nn.Module):
533
+ def __init__(
534
+ self,
535
+ spec_channels,
536
+ segment_size,
537
+ inter_channels,
538
+ hidden_channels,
539
+ filter_channels,
540
+ n_heads,
541
+ n_layers,
542
+ kernel_size,
543
+ p_dropout,
544
+ resblock,
545
+ resblock_kernel_sizes,
546
+ resblock_dilation_sizes,
547
+ upsample_rates,
548
+ upsample_initial_channel,
549
+ upsample_kernel_sizes,
550
+ spk_embed_dim,
551
+ gin_channels,
552
+ sr,
553
+ **kwargs
554
+ ):
555
+ super().__init__()
556
+ if type(sr) == type("strr"):
557
+ sr = sr2sr[sr]
558
+ self.spec_channels = spec_channels
559
+ self.inter_channels = inter_channels
560
+ self.hidden_channels = hidden_channels
561
+ self.filter_channels = filter_channels
562
+ self.n_heads = n_heads
563
+ self.n_layers = n_layers
564
+ self.kernel_size = kernel_size
565
+ self.p_dropout = p_dropout
566
+ self.resblock = resblock
567
+ self.resblock_kernel_sizes = resblock_kernel_sizes
568
+ self.resblock_dilation_sizes = resblock_dilation_sizes
569
+ self.upsample_rates = upsample_rates
570
+ self.upsample_initial_channel = upsample_initial_channel
571
+ self.upsample_kernel_sizes = upsample_kernel_sizes
572
+ self.segment_size = segment_size
573
+ self.gin_channels = gin_channels
574
+ # self.hop_length = hop_length#
575
+ self.spk_embed_dim = spk_embed_dim
576
+ self.enc_p = TextEncoder256(
577
+ inter_channels,
578
+ hidden_channels,
579
+ filter_channels,
580
+ n_heads,
581
+ n_layers,
582
+ kernel_size,
583
+ p_dropout,
584
+ )
585
+ self.dec = GeneratorNSF(
586
+ inter_channels,
587
+ resblock,
588
+ resblock_kernel_sizes,
589
+ resblock_dilation_sizes,
590
+ upsample_rates,
591
+ upsample_initial_channel,
592
+ upsample_kernel_sizes,
593
+ gin_channels=gin_channels,
594
+ sr=sr,
595
+ is_half=kwargs["is_half"],
596
+ )
597
+ self.enc_q = PosteriorEncoder(
598
+ spec_channels,
599
+ inter_channels,
600
+ hidden_channels,
601
+ 5,
602
+ 1,
603
+ 16,
604
+ gin_channels=gin_channels,
605
+ )
606
+ self.flow = ResidualCouplingBlock(
607
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
608
+ )
609
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
610
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
611
+
612
+ def remove_weight_norm(self):
613
+ self.dec.remove_weight_norm()
614
+ self.flow.remove_weight_norm()
615
+ self.enc_q.remove_weight_norm()
616
+
617
+ def forward(
618
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
619
+ ): # 这里ds是id,[bs,1]
620
+ # print(1,pitch.shape)#[bs,t]
621
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
622
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
623
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
624
+ z_p = self.flow(z, y_mask, g=g)
625
+ z_slice, ids_slice = commons.rand_slice_segments(
626
+ z, y_lengths, self.segment_size
627
+ )
628
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
629
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
630
+ # print(-2,pitchf.shape,z_slice.shape)
631
+ o = self.dec(z_slice, pitchf, g=g)
632
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
633
+
634
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
635
+ g = self.emb_g(sid).unsqueeze(-1)
636
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
637
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
638
+ if rate:
639
+ head = int(z_p.shape[2] * rate)
640
+ z_p = z_p[:, :, -head:]
641
+ x_mask = x_mask[:, :, -head:]
642
+ nsff0 = nsff0[:, -head:]
643
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
644
+ o = self.dec(z * x_mask, nsff0, g=g)
645
+ return o, x_mask, (z, z_p, m_p, logs_p)
646
+
647
+
648
+ class SynthesizerTrnMs768NSFsid(nn.Module):
649
+ def __init__(
650
+ self,
651
+ spec_channels,
652
+ segment_size,
653
+ inter_channels,
654
+ hidden_channels,
655
+ filter_channels,
656
+ n_heads,
657
+ n_layers,
658
+ kernel_size,
659
+ p_dropout,
660
+ resblock,
661
+ resblock_kernel_sizes,
662
+ resblock_dilation_sizes,
663
+ upsample_rates,
664
+ upsample_initial_channel,
665
+ upsample_kernel_sizes,
666
+ spk_embed_dim,
667
+ gin_channels,
668
+ sr,
669
+ **kwargs
670
+ ):
671
+ super().__init__()
672
+ if type(sr) == type("strr"):
673
+ sr = sr2sr[sr]
674
+ self.spec_channels = spec_channels
675
+ self.inter_channels = inter_channels
676
+ self.hidden_channels = hidden_channels
677
+ self.filter_channels = filter_channels
678
+ self.n_heads = n_heads
679
+ self.n_layers = n_layers
680
+ self.kernel_size = kernel_size
681
+ self.p_dropout = p_dropout
682
+ self.resblock = resblock
683
+ self.resblock_kernel_sizes = resblock_kernel_sizes
684
+ self.resblock_dilation_sizes = resblock_dilation_sizes
685
+ self.upsample_rates = upsample_rates
686
+ self.upsample_initial_channel = upsample_initial_channel
687
+ self.upsample_kernel_sizes = upsample_kernel_sizes
688
+ self.segment_size = segment_size
689
+ self.gin_channels = gin_channels
690
+ # self.hop_length = hop_length#
691
+ self.spk_embed_dim = spk_embed_dim
692
+ self.enc_p = TextEncoder768(
693
+ inter_channels,
694
+ hidden_channels,
695
+ filter_channels,
696
+ n_heads,
697
+ n_layers,
698
+ kernel_size,
699
+ p_dropout,
700
+ )
701
+ self.dec = GeneratorNSF(
702
+ inter_channels,
703
+ resblock,
704
+ resblock_kernel_sizes,
705
+ resblock_dilation_sizes,
706
+ upsample_rates,
707
+ upsample_initial_channel,
708
+ upsample_kernel_sizes,
709
+ gin_channels=gin_channels,
710
+ sr=sr,
711
+ is_half=kwargs["is_half"],
712
+ )
713
+ self.enc_q = PosteriorEncoder(
714
+ spec_channels,
715
+ inter_channels,
716
+ hidden_channels,
717
+ 5,
718
+ 1,
719
+ 16,
720
+ gin_channels=gin_channels,
721
+ )
722
+ self.flow = ResidualCouplingBlock(
723
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
724
+ )
725
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
726
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
727
+
728
+ def remove_weight_norm(self):
729
+ self.dec.remove_weight_norm()
730
+ self.flow.remove_weight_norm()
731
+ self.enc_q.remove_weight_norm()
732
+
733
+ def forward(
734
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
735
+ ): # 这里ds是id,[bs,1]
736
+ # print(1,pitch.shape)#[bs,t]
737
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
738
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
739
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
740
+ z_p = self.flow(z, y_mask, g=g)
741
+ z_slice, ids_slice = commons.rand_slice_segments(
742
+ z, y_lengths, self.segment_size
743
+ )
744
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
745
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
746
+ # print(-2,pitchf.shape,z_slice.shape)
747
+ o = self.dec(z_slice, pitchf, g=g)
748
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
749
+
750
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
751
+ g = self.emb_g(sid).unsqueeze(-1)
752
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
753
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
754
+ if rate:
755
+ head = int(z_p.shape[2] * rate)
756
+ z_p = z_p[:, :, -head:]
757
+ x_mask = x_mask[:, :, -head:]
758
+ nsff0 = nsff0[:, -head:]
759
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
760
+ o = self.dec(z * x_mask, nsff0, g=g)
761
+ return o, x_mask, (z, z_p, m_p, logs_p)
762
+
763
+
764
+ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
765
+ def __init__(
766
+ self,
767
+ spec_channels,
768
+ segment_size,
769
+ inter_channels,
770
+ hidden_channels,
771
+ filter_channels,
772
+ n_heads,
773
+ n_layers,
774
+ kernel_size,
775
+ p_dropout,
776
+ resblock,
777
+ resblock_kernel_sizes,
778
+ resblock_dilation_sizes,
779
+ upsample_rates,
780
+ upsample_initial_channel,
781
+ upsample_kernel_sizes,
782
+ spk_embed_dim,
783
+ gin_channels,
784
+ sr=None,
785
+ **kwargs
786
+ ):
787
+ super().__init__()
788
+ self.spec_channels = spec_channels
789
+ self.inter_channels = inter_channels
790
+ self.hidden_channels = hidden_channels
791
+ self.filter_channels = filter_channels
792
+ self.n_heads = n_heads
793
+ self.n_layers = n_layers
794
+ self.kernel_size = kernel_size
795
+ self.p_dropout = p_dropout
796
+ self.resblock = resblock
797
+ self.resblock_kernel_sizes = resblock_kernel_sizes
798
+ self.resblock_dilation_sizes = resblock_dilation_sizes
799
+ self.upsample_rates = upsample_rates
800
+ self.upsample_initial_channel = upsample_initial_channel
801
+ self.upsample_kernel_sizes = upsample_kernel_sizes
802
+ self.segment_size = segment_size
803
+ self.gin_channels = gin_channels
804
+ # self.hop_length = hop_length#
805
+ self.spk_embed_dim = spk_embed_dim
806
+ self.enc_p = TextEncoder256(
807
+ inter_channels,
808
+ hidden_channels,
809
+ filter_channels,
810
+ n_heads,
811
+ n_layers,
812
+ kernel_size,
813
+ p_dropout,
814
+ f0=False,
815
+ )
816
+ self.dec = Generator(
817
+ inter_channels,
818
+ resblock,
819
+ resblock_kernel_sizes,
820
+ resblock_dilation_sizes,
821
+ upsample_rates,
822
+ upsample_initial_channel,
823
+ upsample_kernel_sizes,
824
+ gin_channels=gin_channels,
825
+ )
826
+ self.enc_q = PosteriorEncoder(
827
+ spec_channels,
828
+ inter_channels,
829
+ hidden_channels,
830
+ 5,
831
+ 1,
832
+ 16,
833
+ gin_channels=gin_channels,
834
+ )
835
+ self.flow = ResidualCouplingBlock(
836
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
837
+ )
838
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
839
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
840
+
841
+ def remove_weight_norm(self):
842
+ self.dec.remove_weight_norm()
843
+ self.flow.remove_weight_norm()
844
+ self.enc_q.remove_weight_norm()
845
+
846
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
847
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
848
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
849
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
850
+ z_p = self.flow(z, y_mask, g=g)
851
+ z_slice, ids_slice = commons.rand_slice_segments(
852
+ z, y_lengths, self.segment_size
853
+ )
854
+ o = self.dec(z_slice, g=g)
855
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
856
+
857
+ def infer(self, phone, phone_lengths, sid, rate=None):
858
+ g = self.emb_g(sid).unsqueeze(-1)
859
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
860
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
861
+ if rate:
862
+ head = int(z_p.shape[2] * rate)
863
+ z_p = z_p[:, :, -head:]
864
+ x_mask = x_mask[:, :, -head:]
865
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
866
+ o = self.dec(z * x_mask, g=g)
867
+ return o, x_mask, (z, z_p, m_p, logs_p)
868
+
869
+
870
+ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
871
+ def __init__(
872
+ self,
873
+ spec_channels,
874
+ segment_size,
875
+ inter_channels,
876
+ hidden_channels,
877
+ filter_channels,
878
+ n_heads,
879
+ n_layers,
880
+ kernel_size,
881
+ p_dropout,
882
+ resblock,
883
+ resblock_kernel_sizes,
884
+ resblock_dilation_sizes,
885
+ upsample_rates,
886
+ upsample_initial_channel,
887
+ upsample_kernel_sizes,
888
+ spk_embed_dim,
889
+ gin_channels,
890
+ sr=None,
891
+ **kwargs
892
+ ):
893
+ super().__init__()
894
+ self.spec_channels = spec_channels
895
+ self.inter_channels = inter_channels
896
+ self.hidden_channels = hidden_channels
897
+ self.filter_channels = filter_channels
898
+ self.n_heads = n_heads
899
+ self.n_layers = n_layers
900
+ self.kernel_size = kernel_size
901
+ self.p_dropout = p_dropout
902
+ self.resblock = resblock
903
+ self.resblock_kernel_sizes = resblock_kernel_sizes
904
+ self.resblock_dilation_sizes = resblock_dilation_sizes
905
+ self.upsample_rates = upsample_rates
906
+ self.upsample_initial_channel = upsample_initial_channel
907
+ self.upsample_kernel_sizes = upsample_kernel_sizes
908
+ self.segment_size = segment_size
909
+ self.gin_channels = gin_channels
910
+ # self.hop_length = hop_length#
911
+ self.spk_embed_dim = spk_embed_dim
912
+ self.enc_p = TextEncoder768(
913
+ inter_channels,
914
+ hidden_channels,
915
+ filter_channels,
916
+ n_heads,
917
+ n_layers,
918
+ kernel_size,
919
+ p_dropout,
920
+ f0=False,
921
+ )
922
+ self.dec = Generator(
923
+ inter_channels,
924
+ resblock,
925
+ resblock_kernel_sizes,
926
+ resblock_dilation_sizes,
927
+ upsample_rates,
928
+ upsample_initial_channel,
929
+ upsample_kernel_sizes,
930
+ gin_channels=gin_channels,
931
+ )
932
+ self.enc_q = PosteriorEncoder(
933
+ spec_channels,
934
+ inter_channels,
935
+ hidden_channels,
936
+ 5,
937
+ 1,
938
+ 16,
939
+ gin_channels=gin_channels,
940
+ )
941
+ self.flow = ResidualCouplingBlock(
942
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
943
+ )
944
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
945
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
946
+
947
+ def remove_weight_norm(self):
948
+ self.dec.remove_weight_norm()
949
+ self.flow.remove_weight_norm()
950
+ self.enc_q.remove_weight_norm()
951
+
952
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
953
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
954
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
955
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
956
+ z_p = self.flow(z, y_mask, g=g)
957
+ z_slice, ids_slice = commons.rand_slice_segments(
958
+ z, y_lengths, self.segment_size
959
+ )
960
+ o = self.dec(z_slice, g=g)
961
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
962
+
963
+ def infer(self, phone, phone_lengths, sid, rate=None):
964
+ g = self.emb_g(sid).unsqueeze(-1)
965
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
966
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
967
+ if rate:
968
+ head = int(z_p.shape[2] * rate)
969
+ z_p = z_p[:, :, -head:]
970
+ x_mask = x_mask[:, :, -head:]
971
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
972
+ o = self.dec(z * x_mask, g=g)
973
+ return o, x_mask, (z, z_p, m_p, logs_p)
974
+
975
+
976
+ class MultiPeriodDiscriminator(torch.nn.Module):
977
+ def __init__(self, use_spectral_norm=False):
978
+ super(MultiPeriodDiscriminator, self).__init__()
979
+ periods = [2, 3, 5, 7, 11, 17]
980
+ # periods = [3, 5, 7, 11, 17, 23, 37]
981
+
982
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
983
+ discs = discs + [
984
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
985
+ ]
986
+ self.discriminators = nn.ModuleList(discs)
987
+
988
+ def forward(self, y, y_hat):
989
+ y_d_rs = [] #
990
+ y_d_gs = []
991
+ fmap_rs = []
992
+ fmap_gs = []
993
+ for i, d in enumerate(self.discriminators):
994
+ y_d_r, fmap_r = d(y)
995
+ y_d_g, fmap_g = d(y_hat)
996
+ # for j in range(len(fmap_r)):
997
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
998
+ y_d_rs.append(y_d_r)
999
+ y_d_gs.append(y_d_g)
1000
+ fmap_rs.append(fmap_r)
1001
+ fmap_gs.append(fmap_g)
1002
+
1003
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1004
+
1005
+
1006
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
1007
+ def __init__(self, use_spectral_norm=False):
1008
+ super(MultiPeriodDiscriminatorV2, self).__init__()
1009
+ # periods = [2, 3, 5, 7, 11, 17]
1010
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
1011
+
1012
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1013
+ discs = discs + [
1014
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1015
+ ]
1016
+ self.discriminators = nn.ModuleList(discs)
1017
+
1018
+ def forward(self, y, y_hat):
1019
+ y_d_rs = [] #
1020
+ y_d_gs = []
1021
+ fmap_rs = []
1022
+ fmap_gs = []
1023
+ for i, d in enumerate(self.discriminators):
1024
+ y_d_r, fmap_r = d(y)
1025
+ y_d_g, fmap_g = d(y_hat)
1026
+ # for j in range(len(fmap_r)):
1027
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1028
+ y_d_rs.append(y_d_r)
1029
+ y_d_gs.append(y_d_g)
1030
+ fmap_rs.append(fmap_r)
1031
+ fmap_gs.append(fmap_g)
1032
+
1033
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1034
+
1035
+
1036
+ class DiscriminatorS(torch.nn.Module):
1037
+ def __init__(self, use_spectral_norm=False):
1038
+ super(DiscriminatorS, self).__init__()
1039
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1040
+ self.convs = nn.ModuleList(
1041
+ [
1042
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1043
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1044
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1045
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1046
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1047
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1048
+ ]
1049
+ )
1050
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1051
+
1052
+ def forward(self, x):
1053
+ fmap = []
1054
+
1055
+ for l in self.convs:
1056
+ x = l(x)
1057
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1058
+ fmap.append(x)
1059
+ x = self.conv_post(x)
1060
+ fmap.append(x)
1061
+ x = torch.flatten(x, 1, -1)
1062
+
1063
+ return x, fmap
1064
+
1065
+
1066
+ class DiscriminatorP(torch.nn.Module):
1067
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1068
+ super(DiscriminatorP, self).__init__()
1069
+ self.period = period
1070
+ self.use_spectral_norm = use_spectral_norm
1071
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1072
+ self.convs = nn.ModuleList(
1073
+ [
1074
+ norm_f(
1075
+ Conv2d(
1076
+ 1,
1077
+ 32,
1078
+ (kernel_size, 1),
1079
+ (stride, 1),
1080
+ padding=(get_padding(kernel_size, 1), 0),
1081
+ )
1082
+ ),
1083
+ norm_f(
1084
+ Conv2d(
1085
+ 32,
1086
+ 128,
1087
+ (kernel_size, 1),
1088
+ (stride, 1),
1089
+ padding=(get_padding(kernel_size, 1), 0),
1090
+ )
1091
+ ),
1092
+ norm_f(
1093
+ Conv2d(
1094
+ 128,
1095
+ 512,
1096
+ (kernel_size, 1),
1097
+ (stride, 1),
1098
+ padding=(get_padding(kernel_size, 1), 0),
1099
+ )
1100
+ ),
1101
+ norm_f(
1102
+ Conv2d(
1103
+ 512,
1104
+ 1024,
1105
+ (kernel_size, 1),
1106
+ (stride, 1),
1107
+ padding=(get_padding(kernel_size, 1), 0),
1108
+ )
1109
+ ),
1110
+ norm_f(
1111
+ Conv2d(
1112
+ 1024,
1113
+ 1024,
1114
+ (kernel_size, 1),
1115
+ 1,
1116
+ padding=(get_padding(kernel_size, 1), 0),
1117
+ )
1118
+ ),
1119
+ ]
1120
+ )
1121
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1122
+
1123
+ def forward(self, x):
1124
+ fmap = []
1125
+
1126
+ # 1d to 2d
1127
+ b, c, t = x.shape
1128
+ if t % self.period != 0: # pad first
1129
+ n_pad = self.period - (t % self.period)
1130
+ x = F.pad(x, (0, n_pad), "reflect")
1131
+ t = t + n_pad
1132
+ x = x.view(b, c, t // self.period, self.period)
1133
+
1134
+ for l in self.convs:
1135
+ x = l(x)
1136
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1137
+ fmap.append(x)
1138
+ x = self.conv_post(x)
1139
+ fmap.append(x)
1140
+ x = torch.flatten(x, 1, -1)
1141
+
1142
+ return x, fmap
BanG-Dream-MyGO/lib/infer_pack/models_dml.py ADDED
@@ -0,0 +1,1124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math, pdb, os
2
+ from time import time as ttime
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from lib.infer_pack import modules
7
+ from lib.infer_pack import attentions
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack.commons import init_weights, get_padding
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from lib.infer_pack.commons import init_weights
13
+ import numpy as np
14
+ from lib.infer_pack import commons
15
+
16
+
17
+ class TextEncoder256(nn.Module):
18
+ def __init__(
19
+ self,
20
+ out_channels,
21
+ hidden_channels,
22
+ filter_channels,
23
+ n_heads,
24
+ n_layers,
25
+ kernel_size,
26
+ p_dropout,
27
+ f0=True,
28
+ ):
29
+ super().__init__()
30
+ self.out_channels = out_channels
31
+ self.hidden_channels = hidden_channels
32
+ self.filter_channels = filter_channels
33
+ self.n_heads = n_heads
34
+ self.n_layers = n_layers
35
+ self.kernel_size = kernel_size
36
+ self.p_dropout = p_dropout
37
+ self.emb_phone = nn.Linear(256, hidden_channels)
38
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
+ if f0 == True:
40
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
+ self.encoder = attentions.Encoder(
42
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
+ )
44
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
+
46
+ def forward(self, phone, pitch, lengths):
47
+ if pitch == None:
48
+ x = self.emb_phone(phone)
49
+ else:
50
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
+ x = self.lrelu(x)
53
+ x = torch.transpose(x, 1, -1) # [b, h, t]
54
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
+ x.dtype
56
+ )
57
+ x = self.encoder(x * x_mask, x_mask)
58
+ stats = self.proj(x) * x_mask
59
+
60
+ m, logs = torch.split(stats, self.out_channels, dim=1)
61
+ return m, logs, x_mask
62
+
63
+
64
+ class TextEncoder768(nn.Module):
65
+ def __init__(
66
+ self,
67
+ out_channels,
68
+ hidden_channels,
69
+ filter_channels,
70
+ n_heads,
71
+ n_layers,
72
+ kernel_size,
73
+ p_dropout,
74
+ f0=True,
75
+ ):
76
+ super().__init__()
77
+ self.out_channels = out_channels
78
+ self.hidden_channels = hidden_channels
79
+ self.filter_channels = filter_channels
80
+ self.n_heads = n_heads
81
+ self.n_layers = n_layers
82
+ self.kernel_size = kernel_size
83
+ self.p_dropout = p_dropout
84
+ self.emb_phone = nn.Linear(768, hidden_channels)
85
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
+ if f0 == True:
87
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
+ self.encoder = attentions.Encoder(
89
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
+ )
91
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
+
93
+ def forward(self, phone, pitch, lengths):
94
+ if pitch == None:
95
+ x = self.emb_phone(phone)
96
+ else:
97
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
+ x = self.lrelu(x)
100
+ x = torch.transpose(x, 1, -1) # [b, h, t]
101
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
+ x.dtype
103
+ )
104
+ x = self.encoder(x * x_mask, x_mask)
105
+ stats = self.proj(x) * x_mask
106
+
107
+ m, logs = torch.split(stats, self.out_channels, dim=1)
108
+ return m, logs, x_mask
109
+
110
+
111
+ class ResidualCouplingBlock(nn.Module):
112
+ def __init__(
113
+ self,
114
+ channels,
115
+ hidden_channels,
116
+ kernel_size,
117
+ dilation_rate,
118
+ n_layers,
119
+ n_flows=4,
120
+ gin_channels=0,
121
+ ):
122
+ super().__init__()
123
+ self.channels = channels
124
+ self.hidden_channels = hidden_channels
125
+ self.kernel_size = kernel_size
126
+ self.dilation_rate = dilation_rate
127
+ self.n_layers = n_layers
128
+ self.n_flows = n_flows
129
+ self.gin_channels = gin_channels
130
+
131
+ self.flows = nn.ModuleList()
132
+ for i in range(n_flows):
133
+ self.flows.append(
134
+ modules.ResidualCouplingLayer(
135
+ channels,
136
+ hidden_channels,
137
+ kernel_size,
138
+ dilation_rate,
139
+ n_layers,
140
+ gin_channels=gin_channels,
141
+ mean_only=True,
142
+ )
143
+ )
144
+ self.flows.append(modules.Flip())
145
+
146
+ def forward(self, x, x_mask, g=None, reverse=False):
147
+ if not reverse:
148
+ for flow in self.flows:
149
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
+ else:
151
+ for flow in reversed(self.flows):
152
+ x = flow(x, x_mask, g=g, reverse=reverse)
153
+ return x
154
+
155
+ def remove_weight_norm(self):
156
+ for i in range(self.n_flows):
157
+ self.flows[i * 2].remove_weight_norm()
158
+
159
+
160
+ class PosteriorEncoder(nn.Module):
161
+ def __init__(
162
+ self,
163
+ in_channels,
164
+ out_channels,
165
+ hidden_channels,
166
+ kernel_size,
167
+ dilation_rate,
168
+ n_layers,
169
+ gin_channels=0,
170
+ ):
171
+ super().__init__()
172
+ self.in_channels = in_channels
173
+ self.out_channels = out_channels
174
+ self.hidden_channels = hidden_channels
175
+ self.kernel_size = kernel_size
176
+ self.dilation_rate = dilation_rate
177
+ self.n_layers = n_layers
178
+ self.gin_channels = gin_channels
179
+
180
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
+ self.enc = modules.WN(
182
+ hidden_channels,
183
+ kernel_size,
184
+ dilation_rate,
185
+ n_layers,
186
+ gin_channels=gin_channels,
187
+ )
188
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
+
190
+ def forward(self, x, x_lengths, g=None):
191
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
+ x.dtype
193
+ )
194
+ x = self.pre(x) * x_mask
195
+ x = self.enc(x, x_mask, g=g)
196
+ stats = self.proj(x) * x_mask
197
+ m, logs = torch.split(stats, self.out_channels, dim=1)
198
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
+ return z, m, logs, x_mask
200
+
201
+ def remove_weight_norm(self):
202
+ self.enc.remove_weight_norm()
203
+
204
+
205
+ class Generator(torch.nn.Module):
206
+ def __init__(
207
+ self,
208
+ initial_channel,
209
+ resblock,
210
+ resblock_kernel_sizes,
211
+ resblock_dilation_sizes,
212
+ upsample_rates,
213
+ upsample_initial_channel,
214
+ upsample_kernel_sizes,
215
+ gin_channels=0,
216
+ ):
217
+ super(Generator, self).__init__()
218
+ self.num_kernels = len(resblock_kernel_sizes)
219
+ self.num_upsamples = len(upsample_rates)
220
+ self.conv_pre = Conv1d(
221
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
222
+ )
223
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
+
225
+ self.ups = nn.ModuleList()
226
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
+ self.ups.append(
228
+ weight_norm(
229
+ ConvTranspose1d(
230
+ upsample_initial_channel // (2**i),
231
+ upsample_initial_channel // (2 ** (i + 1)),
232
+ k,
233
+ u,
234
+ padding=(k - u) // 2,
235
+ )
236
+ )
237
+ )
238
+
239
+ self.resblocks = nn.ModuleList()
240
+ for i in range(len(self.ups)):
241
+ ch = upsample_initial_channel // (2 ** (i + 1))
242
+ for j, (k, d) in enumerate(
243
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
+ ):
245
+ self.resblocks.append(resblock(ch, k, d))
246
+
247
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
+ self.ups.apply(init_weights)
249
+
250
+ if gin_channels != 0:
251
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
+
253
+ def forward(self, x, g=None):
254
+ x = self.conv_pre(x)
255
+ if g is not None:
256
+ x = x + self.cond(g)
257
+
258
+ for i in range(self.num_upsamples):
259
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
+ x = self.ups[i](x)
261
+ xs = None
262
+ for j in range(self.num_kernels):
263
+ if xs is None:
264
+ xs = self.resblocks[i * self.num_kernels + j](x)
265
+ else:
266
+ xs += self.resblocks[i * self.num_kernels + j](x)
267
+ x = xs / self.num_kernels
268
+ x = F.leaky_relu(x)
269
+ x = self.conv_post(x)
270
+ x = torch.tanh(x)
271
+
272
+ return x
273
+
274
+ def remove_weight_norm(self):
275
+ for l in self.ups:
276
+ remove_weight_norm(l)
277
+ for l in self.resblocks:
278
+ l.remove_weight_norm()
279
+
280
+
281
+ class SineGen(torch.nn.Module):
282
+ """Definition of sine generator
283
+ SineGen(samp_rate, harmonic_num = 0,
284
+ sine_amp = 0.1, noise_std = 0.003,
285
+ voiced_threshold = 0,
286
+ flag_for_pulse=False)
287
+ samp_rate: sampling rate in Hz
288
+ harmonic_num: number of harmonic overtones (default 0)
289
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
290
+ noise_std: std of Gaussian noise (default 0.003)
291
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
+ Note: when flag_for_pulse is True, the first time step of a voiced
294
+ segment is always sin(np.pi) or cos(0)
295
+ """
296
+
297
+ def __init__(
298
+ self,
299
+ samp_rate,
300
+ harmonic_num=0,
301
+ sine_amp=0.1,
302
+ noise_std=0.003,
303
+ voiced_threshold=0,
304
+ flag_for_pulse=False,
305
+ ):
306
+ super(SineGen, self).__init__()
307
+ self.sine_amp = sine_amp
308
+ self.noise_std = noise_std
309
+ self.harmonic_num = harmonic_num
310
+ self.dim = self.harmonic_num + 1
311
+ self.sampling_rate = samp_rate
312
+ self.voiced_threshold = voiced_threshold
313
+
314
+ def _f02uv(self, f0):
315
+ # generate uv signal
316
+ uv = torch.ones_like(f0)
317
+ uv = uv * (f0 > self.voiced_threshold)
318
+ return uv.float()
319
+
320
+ def forward(self, f0, upp):
321
+ """sine_tensor, uv = forward(f0)
322
+ input F0: tensor(batchsize=1, length, dim=1)
323
+ f0 for unvoiced steps should be 0
324
+ output sine_tensor: tensor(batchsize=1, length, dim)
325
+ output uv: tensor(batchsize=1, length, 1)
326
+ """
327
+ with torch.no_grad():
328
+ f0 = f0[:, None].transpose(1, 2)
329
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
+ # fundamental component
331
+ f0_buf[:, :, 0] = f0[:, :, 0]
332
+ for idx in np.arange(self.harmonic_num):
333
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
+ idx + 2
335
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
+ rand_ini = torch.rand(
338
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
+ )
340
+ rand_ini[:, 0] = 0
341
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
+ tmp_over_one *= upp
344
+ tmp_over_one = F.interpolate(
345
+ tmp_over_one.transpose(2, 1),
346
+ scale_factor=upp,
347
+ mode="linear",
348
+ align_corners=True,
349
+ ).transpose(2, 1)
350
+ rad_values = F.interpolate(
351
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
+ ).transpose(
353
+ 2, 1
354
+ ) #######
355
+ tmp_over_one %= 1
356
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
+ cumsum_shift = torch.zeros_like(rad_values)
358
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
+ sine_waves = torch.sin(
360
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
+ )
362
+ sine_waves = sine_waves * self.sine_amp
363
+ uv = self._f02uv(f0)
364
+ uv = F.interpolate(
365
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
+ ).transpose(2, 1)
367
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
+ noise = noise_amp * torch.randn_like(sine_waves)
369
+ sine_waves = sine_waves * uv + noise
370
+ return sine_waves, uv, noise
371
+
372
+
373
+ class SourceModuleHnNSF(torch.nn.Module):
374
+ """SourceModule for hn-nsf
375
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
+ add_noise_std=0.003, voiced_threshod=0)
377
+ sampling_rate: sampling_rate in Hz
378
+ harmonic_num: number of harmonic above F0 (default: 0)
379
+ sine_amp: amplitude of sine source signal (default: 0.1)
380
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
381
+ note that amplitude of noise in unvoiced is decided
382
+ by sine_amp
383
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
384
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
+ F0_sampled (batchsize, length, 1)
386
+ Sine_source (batchsize, length, 1)
387
+ noise_source (batchsize, length 1)
388
+ uv (batchsize, length, 1)
389
+ """
390
+
391
+ def __init__(
392
+ self,
393
+ sampling_rate,
394
+ harmonic_num=0,
395
+ sine_amp=0.1,
396
+ add_noise_std=0.003,
397
+ voiced_threshod=0,
398
+ is_half=True,
399
+ ):
400
+ super(SourceModuleHnNSF, self).__init__()
401
+
402
+ self.sine_amp = sine_amp
403
+ self.noise_std = add_noise_std
404
+ self.is_half = is_half
405
+ # to produce sine waveforms
406
+ self.l_sin_gen = SineGen(
407
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
+ )
409
+
410
+ # to merge source harmonics into a single excitation
411
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
+ self.l_tanh = torch.nn.Tanh()
413
+
414
+ def forward(self, x, upp=None):
415
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
+ if self.is_half:
417
+ sine_wavs = sine_wavs.half()
418
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
+ return sine_merge, None, None # noise, uv
420
+
421
+
422
+ class GeneratorNSF(torch.nn.Module):
423
+ def __init__(
424
+ self,
425
+ initial_channel,
426
+ resblock,
427
+ resblock_kernel_sizes,
428
+ resblock_dilation_sizes,
429
+ upsample_rates,
430
+ upsample_initial_channel,
431
+ upsample_kernel_sizes,
432
+ gin_channels,
433
+ sr,
434
+ is_half=False,
435
+ ):
436
+ super(GeneratorNSF, self).__init__()
437
+ self.num_kernels = len(resblock_kernel_sizes)
438
+ self.num_upsamples = len(upsample_rates)
439
+
440
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
+ self.m_source = SourceModuleHnNSF(
442
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
443
+ )
444
+ self.noise_convs = nn.ModuleList()
445
+ self.conv_pre = Conv1d(
446
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
447
+ )
448
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
+
450
+ self.ups = nn.ModuleList()
451
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
453
+ self.ups.append(
454
+ weight_norm(
455
+ ConvTranspose1d(
456
+ upsample_initial_channel // (2**i),
457
+ upsample_initial_channel // (2 ** (i + 1)),
458
+ k,
459
+ u,
460
+ padding=(k - u) // 2,
461
+ )
462
+ )
463
+ )
464
+ if i + 1 < len(upsample_rates):
465
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
466
+ self.noise_convs.append(
467
+ Conv1d(
468
+ 1,
469
+ c_cur,
470
+ kernel_size=stride_f0 * 2,
471
+ stride=stride_f0,
472
+ padding=stride_f0 // 2,
473
+ )
474
+ )
475
+ else:
476
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
+
478
+ self.resblocks = nn.ModuleList()
479
+ for i in range(len(self.ups)):
480
+ ch = upsample_initial_channel // (2 ** (i + 1))
481
+ for j, (k, d) in enumerate(
482
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
+ ):
484
+ self.resblocks.append(resblock(ch, k, d))
485
+
486
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
+ self.ups.apply(init_weights)
488
+
489
+ if gin_channels != 0:
490
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
+
492
+ self.upp = np.prod(upsample_rates)
493
+
494
+ def forward(self, x, f0, g=None):
495
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
496
+ har_source = har_source.transpose(1, 2)
497
+ x = self.conv_pre(x)
498
+ if g is not None:
499
+ x = x + self.cond(g)
500
+
501
+ for i in range(self.num_upsamples):
502
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
+ x = self.ups[i](x)
504
+ x_source = self.noise_convs[i](har_source)
505
+ x = x + x_source
506
+ xs = None
507
+ for j in range(self.num_kernels):
508
+ if xs is None:
509
+ xs = self.resblocks[i * self.num_kernels + j](x)
510
+ else:
511
+ xs += self.resblocks[i * self.num_kernels + j](x)
512
+ x = xs / self.num_kernels
513
+ x = F.leaky_relu(x)
514
+ x = self.conv_post(x)
515
+ x = torch.tanh(x)
516
+ return x
517
+
518
+ def remove_weight_norm(self):
519
+ for l in self.ups:
520
+ remove_weight_norm(l)
521
+ for l in self.resblocks:
522
+ l.remove_weight_norm()
523
+
524
+
525
+ sr2sr = {
526
+ "32k": 32000,
527
+ "40k": 40000,
528
+ "48k": 48000,
529
+ }
530
+
531
+
532
+ class SynthesizerTrnMs256NSFsid(nn.Module):
533
+ def __init__(
534
+ self,
535
+ spec_channels,
536
+ segment_size,
537
+ inter_channels,
538
+ hidden_channels,
539
+ filter_channels,
540
+ n_heads,
541
+ n_layers,
542
+ kernel_size,
543
+ p_dropout,
544
+ resblock,
545
+ resblock_kernel_sizes,
546
+ resblock_dilation_sizes,
547
+ upsample_rates,
548
+ upsample_initial_channel,
549
+ upsample_kernel_sizes,
550
+ spk_embed_dim,
551
+ gin_channels,
552
+ sr,
553
+ **kwargs
554
+ ):
555
+ super().__init__()
556
+ if type(sr) == type("strr"):
557
+ sr = sr2sr[sr]
558
+ self.spec_channels = spec_channels
559
+ self.inter_channels = inter_channels
560
+ self.hidden_channels = hidden_channels
561
+ self.filter_channels = filter_channels
562
+ self.n_heads = n_heads
563
+ self.n_layers = n_layers
564
+ self.kernel_size = kernel_size
565
+ self.p_dropout = p_dropout
566
+ self.resblock = resblock
567
+ self.resblock_kernel_sizes = resblock_kernel_sizes
568
+ self.resblock_dilation_sizes = resblock_dilation_sizes
569
+ self.upsample_rates = upsample_rates
570
+ self.upsample_initial_channel = upsample_initial_channel
571
+ self.upsample_kernel_sizes = upsample_kernel_sizes
572
+ self.segment_size = segment_size
573
+ self.gin_channels = gin_channels
574
+ # self.hop_length = hop_length#
575
+ self.spk_embed_dim = spk_embed_dim
576
+ self.enc_p = TextEncoder256(
577
+ inter_channels,
578
+ hidden_channels,
579
+ filter_channels,
580
+ n_heads,
581
+ n_layers,
582
+ kernel_size,
583
+ p_dropout,
584
+ )
585
+ self.dec = GeneratorNSF(
586
+ inter_channels,
587
+ resblock,
588
+ resblock_kernel_sizes,
589
+ resblock_dilation_sizes,
590
+ upsample_rates,
591
+ upsample_initial_channel,
592
+ upsample_kernel_sizes,
593
+ gin_channels=gin_channels,
594
+ sr=sr,
595
+ is_half=kwargs["is_half"],
596
+ )
597
+ self.enc_q = PosteriorEncoder(
598
+ spec_channels,
599
+ inter_channels,
600
+ hidden_channels,
601
+ 5,
602
+ 1,
603
+ 16,
604
+ gin_channels=gin_channels,
605
+ )
606
+ self.flow = ResidualCouplingBlock(
607
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
608
+ )
609
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
610
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
611
+
612
+ def remove_weight_norm(self):
613
+ self.dec.remove_weight_norm()
614
+ self.flow.remove_weight_norm()
615
+ self.enc_q.remove_weight_norm()
616
+
617
+ def forward(
618
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
619
+ ): # 这里ds是id,[bs,1]
620
+ # print(1,pitch.shape)#[bs,t]
621
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
622
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
623
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
624
+ z_p = self.flow(z, y_mask, g=g)
625
+ z_slice, ids_slice = commons.rand_slice_segments(
626
+ z, y_lengths, self.segment_size
627
+ )
628
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
629
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
630
+ # print(-2,pitchf.shape,z_slice.shape)
631
+ o = self.dec(z_slice, pitchf, g=g)
632
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
633
+
634
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
635
+ g = self.emb_g(sid).unsqueeze(-1)
636
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
637
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
638
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
639
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
640
+ return o, x_mask, (z, z_p, m_p, logs_p)
641
+
642
+
643
+ class SynthesizerTrnMs768NSFsid(nn.Module):
644
+ def __init__(
645
+ self,
646
+ spec_channels,
647
+ segment_size,
648
+ inter_channels,
649
+ hidden_channels,
650
+ filter_channels,
651
+ n_heads,
652
+ n_layers,
653
+ kernel_size,
654
+ p_dropout,
655
+ resblock,
656
+ resblock_kernel_sizes,
657
+ resblock_dilation_sizes,
658
+ upsample_rates,
659
+ upsample_initial_channel,
660
+ upsample_kernel_sizes,
661
+ spk_embed_dim,
662
+ gin_channels,
663
+ sr,
664
+ **kwargs
665
+ ):
666
+ super().__init__()
667
+ if type(sr) == type("strr"):
668
+ sr = sr2sr[sr]
669
+ self.spec_channels = spec_channels
670
+ self.inter_channels = inter_channels
671
+ self.hidden_channels = hidden_channels
672
+ self.filter_channels = filter_channels
673
+ self.n_heads = n_heads
674
+ self.n_layers = n_layers
675
+ self.kernel_size = kernel_size
676
+ self.p_dropout = p_dropout
677
+ self.resblock = resblock
678
+ self.resblock_kernel_sizes = resblock_kernel_sizes
679
+ self.resblock_dilation_sizes = resblock_dilation_sizes
680
+ self.upsample_rates = upsample_rates
681
+ self.upsample_initial_channel = upsample_initial_channel
682
+ self.upsample_kernel_sizes = upsample_kernel_sizes
683
+ self.segment_size = segment_size
684
+ self.gin_channels = gin_channels
685
+ # self.hop_length = hop_length#
686
+ self.spk_embed_dim = spk_embed_dim
687
+ self.enc_p = TextEncoder768(
688
+ inter_channels,
689
+ hidden_channels,
690
+ filter_channels,
691
+ n_heads,
692
+ n_layers,
693
+ kernel_size,
694
+ p_dropout,
695
+ )
696
+ self.dec = GeneratorNSF(
697
+ inter_channels,
698
+ resblock,
699
+ resblock_kernel_sizes,
700
+ resblock_dilation_sizes,
701
+ upsample_rates,
702
+ upsample_initial_channel,
703
+ upsample_kernel_sizes,
704
+ gin_channels=gin_channels,
705
+ sr=sr,
706
+ is_half=kwargs["is_half"],
707
+ )
708
+ self.enc_q = PosteriorEncoder(
709
+ spec_channels,
710
+ inter_channels,
711
+ hidden_channels,
712
+ 5,
713
+ 1,
714
+ 16,
715
+ gin_channels=gin_channels,
716
+ )
717
+ self.flow = ResidualCouplingBlock(
718
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
719
+ )
720
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
721
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
722
+
723
+ def remove_weight_norm(self):
724
+ self.dec.remove_weight_norm()
725
+ self.flow.remove_weight_norm()
726
+ self.enc_q.remove_weight_norm()
727
+
728
+ def forward(
729
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
730
+ ): # 这里ds是id,[bs,1]
731
+ # print(1,pitch.shape)#[bs,t]
732
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
733
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
734
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
735
+ z_p = self.flow(z, y_mask, g=g)
736
+ z_slice, ids_slice = commons.rand_slice_segments(
737
+ z, y_lengths, self.segment_size
738
+ )
739
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
740
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
741
+ # print(-2,pitchf.shape,z_slice.shape)
742
+ o = self.dec(z_slice, pitchf, g=g)
743
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
744
+
745
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
746
+ g = self.emb_g(sid).unsqueeze(-1)
747
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
748
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
749
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
750
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
751
+ return o, x_mask, (z, z_p, m_p, logs_p)
752
+
753
+
754
+ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
755
+ def __init__(
756
+ self,
757
+ spec_channels,
758
+ segment_size,
759
+ inter_channels,
760
+ hidden_channels,
761
+ filter_channels,
762
+ n_heads,
763
+ n_layers,
764
+ kernel_size,
765
+ p_dropout,
766
+ resblock,
767
+ resblock_kernel_sizes,
768
+ resblock_dilation_sizes,
769
+ upsample_rates,
770
+ upsample_initial_channel,
771
+ upsample_kernel_sizes,
772
+ spk_embed_dim,
773
+ gin_channels,
774
+ sr=None,
775
+ **kwargs
776
+ ):
777
+ super().__init__()
778
+ self.spec_channels = spec_channels
779
+ self.inter_channels = inter_channels
780
+ self.hidden_channels = hidden_channels
781
+ self.filter_channels = filter_channels
782
+ self.n_heads = n_heads
783
+ self.n_layers = n_layers
784
+ self.kernel_size = kernel_size
785
+ self.p_dropout = p_dropout
786
+ self.resblock = resblock
787
+ self.resblock_kernel_sizes = resblock_kernel_sizes
788
+ self.resblock_dilation_sizes = resblock_dilation_sizes
789
+ self.upsample_rates = upsample_rates
790
+ self.upsample_initial_channel = upsample_initial_channel
791
+ self.upsample_kernel_sizes = upsample_kernel_sizes
792
+ self.segment_size = segment_size
793
+ self.gin_channels = gin_channels
794
+ # self.hop_length = hop_length#
795
+ self.spk_embed_dim = spk_embed_dim
796
+ self.enc_p = TextEncoder256(
797
+ inter_channels,
798
+ hidden_channels,
799
+ filter_channels,
800
+ n_heads,
801
+ n_layers,
802
+ kernel_size,
803
+ p_dropout,
804
+ f0=False,
805
+ )
806
+ self.dec = Generator(
807
+ inter_channels,
808
+ resblock,
809
+ resblock_kernel_sizes,
810
+ resblock_dilation_sizes,
811
+ upsample_rates,
812
+ upsample_initial_channel,
813
+ upsample_kernel_sizes,
814
+ gin_channels=gin_channels,
815
+ )
816
+ self.enc_q = PosteriorEncoder(
817
+ spec_channels,
818
+ inter_channels,
819
+ hidden_channels,
820
+ 5,
821
+ 1,
822
+ 16,
823
+ gin_channels=gin_channels,
824
+ )
825
+ self.flow = ResidualCouplingBlock(
826
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
827
+ )
828
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
829
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
830
+
831
+ def remove_weight_norm(self):
832
+ self.dec.remove_weight_norm()
833
+ self.flow.remove_weight_norm()
834
+ self.enc_q.remove_weight_norm()
835
+
836
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
837
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
838
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
839
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
840
+ z_p = self.flow(z, y_mask, g=g)
841
+ z_slice, ids_slice = commons.rand_slice_segments(
842
+ z, y_lengths, self.segment_size
843
+ )
844
+ o = self.dec(z_slice, g=g)
845
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
846
+
847
+ def infer(self, phone, phone_lengths, sid, max_len=None):
848
+ g = self.emb_g(sid).unsqueeze(-1)
849
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
850
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
851
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
852
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
853
+ return o, x_mask, (z, z_p, m_p, logs_p)
854
+
855
+
856
+ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
857
+ def __init__(
858
+ self,
859
+ spec_channels,
860
+ segment_size,
861
+ inter_channels,
862
+ hidden_channels,
863
+ filter_channels,
864
+ n_heads,
865
+ n_layers,
866
+ kernel_size,
867
+ p_dropout,
868
+ resblock,
869
+ resblock_kernel_sizes,
870
+ resblock_dilation_sizes,
871
+ upsample_rates,
872
+ upsample_initial_channel,
873
+ upsample_kernel_sizes,
874
+ spk_embed_dim,
875
+ gin_channels,
876
+ sr=None,
877
+ **kwargs
878
+ ):
879
+ super().__init__()
880
+ self.spec_channels = spec_channels
881
+ self.inter_channels = inter_channels
882
+ self.hidden_channels = hidden_channels
883
+ self.filter_channels = filter_channels
884
+ self.n_heads = n_heads
885
+ self.n_layers = n_layers
886
+ self.kernel_size = kernel_size
887
+ self.p_dropout = p_dropout
888
+ self.resblock = resblock
889
+ self.resblock_kernel_sizes = resblock_kernel_sizes
890
+ self.resblock_dilation_sizes = resblock_dilation_sizes
891
+ self.upsample_rates = upsample_rates
892
+ self.upsample_initial_channel = upsample_initial_channel
893
+ self.upsample_kernel_sizes = upsample_kernel_sizes
894
+ self.segment_size = segment_size
895
+ self.gin_channels = gin_channels
896
+ # self.hop_length = hop_length#
897
+ self.spk_embed_dim = spk_embed_dim
898
+ self.enc_p = TextEncoder768(
899
+ inter_channels,
900
+ hidden_channels,
901
+ filter_channels,
902
+ n_heads,
903
+ n_layers,
904
+ kernel_size,
905
+ p_dropout,
906
+ f0=False,
907
+ )
908
+ self.dec = Generator(
909
+ inter_channels,
910
+ resblock,
911
+ resblock_kernel_sizes,
912
+ resblock_dilation_sizes,
913
+ upsample_rates,
914
+ upsample_initial_channel,
915
+ upsample_kernel_sizes,
916
+ gin_channels=gin_channels,
917
+ )
918
+ self.enc_q = PosteriorEncoder(
919
+ spec_channels,
920
+ inter_channels,
921
+ hidden_channels,
922
+ 5,
923
+ 1,
924
+ 16,
925
+ gin_channels=gin_channels,
926
+ )
927
+ self.flow = ResidualCouplingBlock(
928
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
929
+ )
930
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
931
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
932
+
933
+ def remove_weight_norm(self):
934
+ self.dec.remove_weight_norm()
935
+ self.flow.remove_weight_norm()
936
+ self.enc_q.remove_weight_norm()
937
+
938
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
939
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
940
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
941
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
942
+ z_p = self.flow(z, y_mask, g=g)
943
+ z_slice, ids_slice = commons.rand_slice_segments(
944
+ z, y_lengths, self.segment_size
945
+ )
946
+ o = self.dec(z_slice, g=g)
947
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
948
+
949
+ def infer(self, phone, phone_lengths, sid, max_len=None):
950
+ g = self.emb_g(sid).unsqueeze(-1)
951
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
952
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
953
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
954
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
955
+ return o, x_mask, (z, z_p, m_p, logs_p)
956
+
957
+
958
+ class MultiPeriodDiscriminator(torch.nn.Module):
959
+ def __init__(self, use_spectral_norm=False):
960
+ super(MultiPeriodDiscriminator, self).__init__()
961
+ periods = [2, 3, 5, 7, 11, 17]
962
+ # periods = [3, 5, 7, 11, 17, 23, 37]
963
+
964
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
965
+ discs = discs + [
966
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
967
+ ]
968
+ self.discriminators = nn.ModuleList(discs)
969
+
970
+ def forward(self, y, y_hat):
971
+ y_d_rs = [] #
972
+ y_d_gs = []
973
+ fmap_rs = []
974
+ fmap_gs = []
975
+ for i, d in enumerate(self.discriminators):
976
+ y_d_r, fmap_r = d(y)
977
+ y_d_g, fmap_g = d(y_hat)
978
+ # for j in range(len(fmap_r)):
979
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
980
+ y_d_rs.append(y_d_r)
981
+ y_d_gs.append(y_d_g)
982
+ fmap_rs.append(fmap_r)
983
+ fmap_gs.append(fmap_g)
984
+
985
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
986
+
987
+
988
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
989
+ def __init__(self, use_spectral_norm=False):
990
+ super(MultiPeriodDiscriminatorV2, self).__init__()
991
+ # periods = [2, 3, 5, 7, 11, 17]
992
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
993
+
994
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
995
+ discs = discs + [
996
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
997
+ ]
998
+ self.discriminators = nn.ModuleList(discs)
999
+
1000
+ def forward(self, y, y_hat):
1001
+ y_d_rs = [] #
1002
+ y_d_gs = []
1003
+ fmap_rs = []
1004
+ fmap_gs = []
1005
+ for i, d in enumerate(self.discriminators):
1006
+ y_d_r, fmap_r = d(y)
1007
+ y_d_g, fmap_g = d(y_hat)
1008
+ # for j in range(len(fmap_r)):
1009
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1010
+ y_d_rs.append(y_d_r)
1011
+ y_d_gs.append(y_d_g)
1012
+ fmap_rs.append(fmap_r)
1013
+ fmap_gs.append(fmap_g)
1014
+
1015
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1016
+
1017
+
1018
+ class DiscriminatorS(torch.nn.Module):
1019
+ def __init__(self, use_spectral_norm=False):
1020
+ super(DiscriminatorS, self).__init__()
1021
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1022
+ self.convs = nn.ModuleList(
1023
+ [
1024
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1025
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1026
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1027
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1028
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1029
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1030
+ ]
1031
+ )
1032
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1033
+
1034
+ def forward(self, x):
1035
+ fmap = []
1036
+
1037
+ for l in self.convs:
1038
+ x = l(x)
1039
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1040
+ fmap.append(x)
1041
+ x = self.conv_post(x)
1042
+ fmap.append(x)
1043
+ x = torch.flatten(x, 1, -1)
1044
+
1045
+ return x, fmap
1046
+
1047
+
1048
+ class DiscriminatorP(torch.nn.Module):
1049
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1050
+ super(DiscriminatorP, self).__init__()
1051
+ self.period = period
1052
+ self.use_spectral_norm = use_spectral_norm
1053
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1054
+ self.convs = nn.ModuleList(
1055
+ [
1056
+ norm_f(
1057
+ Conv2d(
1058
+ 1,
1059
+ 32,
1060
+ (kernel_size, 1),
1061
+ (stride, 1),
1062
+ padding=(get_padding(kernel_size, 1), 0),
1063
+ )
1064
+ ),
1065
+ norm_f(
1066
+ Conv2d(
1067
+ 32,
1068
+ 128,
1069
+ (kernel_size, 1),
1070
+ (stride, 1),
1071
+ padding=(get_padding(kernel_size, 1), 0),
1072
+ )
1073
+ ),
1074
+ norm_f(
1075
+ Conv2d(
1076
+ 128,
1077
+ 512,
1078
+ (kernel_size, 1),
1079
+ (stride, 1),
1080
+ padding=(get_padding(kernel_size, 1), 0),
1081
+ )
1082
+ ),
1083
+ norm_f(
1084
+ Conv2d(
1085
+ 512,
1086
+ 1024,
1087
+ (kernel_size, 1),
1088
+ (stride, 1),
1089
+ padding=(get_padding(kernel_size, 1), 0),
1090
+ )
1091
+ ),
1092
+ norm_f(
1093
+ Conv2d(
1094
+ 1024,
1095
+ 1024,
1096
+ (kernel_size, 1),
1097
+ 1,
1098
+ padding=(get_padding(kernel_size, 1), 0),
1099
+ )
1100
+ ),
1101
+ ]
1102
+ )
1103
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1104
+
1105
+ def forward(self, x):
1106
+ fmap = []
1107
+
1108
+ # 1d to 2d
1109
+ b, c, t = x.shape
1110
+ if t % self.period != 0: # pad first
1111
+ n_pad = self.period - (t % self.period)
1112
+ x = F.pad(x, (0, n_pad), "reflect")
1113
+ t = t + n_pad
1114
+ x = x.view(b, c, t // self.period, self.period)
1115
+
1116
+ for l in self.convs:
1117
+ x = l(x)
1118
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1119
+ fmap.append(x)
1120
+ x = self.conv_post(x)
1121
+ fmap.append(x)
1122
+ x = torch.flatten(x, 1, -1)
1123
+
1124
+ return x, fmap
BanG-Dream-MyGO/lib/infer_pack/models_onnx.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math, pdb, os
2
+ from time import time as ttime
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from lib.infer_pack import modules
7
+ from lib.infer_pack import attentions
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack.commons import init_weights, get_padding
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from lib.infer_pack.commons import init_weights
13
+ import numpy as np
14
+ from lib.infer_pack import commons
15
+
16
+
17
+ class TextEncoder256(nn.Module):
18
+ def __init__(
19
+ self,
20
+ out_channels,
21
+ hidden_channels,
22
+ filter_channels,
23
+ n_heads,
24
+ n_layers,
25
+ kernel_size,
26
+ p_dropout,
27
+ f0=True,
28
+ ):
29
+ super().__init__()
30
+ self.out_channels = out_channels
31
+ self.hidden_channels = hidden_channels
32
+ self.filter_channels = filter_channels
33
+ self.n_heads = n_heads
34
+ self.n_layers = n_layers
35
+ self.kernel_size = kernel_size
36
+ self.p_dropout = p_dropout
37
+ self.emb_phone = nn.Linear(256, hidden_channels)
38
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
+ if f0 == True:
40
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
+ self.encoder = attentions.Encoder(
42
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
+ )
44
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
+
46
+ def forward(self, phone, pitch, lengths):
47
+ if pitch == None:
48
+ x = self.emb_phone(phone)
49
+ else:
50
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
+ x = self.lrelu(x)
53
+ x = torch.transpose(x, 1, -1) # [b, h, t]
54
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
+ x.dtype
56
+ )
57
+ x = self.encoder(x * x_mask, x_mask)
58
+ stats = self.proj(x) * x_mask
59
+
60
+ m, logs = torch.split(stats, self.out_channels, dim=1)
61
+ return m, logs, x_mask
62
+
63
+
64
+ class TextEncoder768(nn.Module):
65
+ def __init__(
66
+ self,
67
+ out_channels,
68
+ hidden_channels,
69
+ filter_channels,
70
+ n_heads,
71
+ n_layers,
72
+ kernel_size,
73
+ p_dropout,
74
+ f0=True,
75
+ ):
76
+ super().__init__()
77
+ self.out_channels = out_channels
78
+ self.hidden_channels = hidden_channels
79
+ self.filter_channels = filter_channels
80
+ self.n_heads = n_heads
81
+ self.n_layers = n_layers
82
+ self.kernel_size = kernel_size
83
+ self.p_dropout = p_dropout
84
+ self.emb_phone = nn.Linear(768, hidden_channels)
85
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
+ if f0 == True:
87
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
+ self.encoder = attentions.Encoder(
89
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
+ )
91
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
+
93
+ def forward(self, phone, pitch, lengths):
94
+ if pitch == None:
95
+ x = self.emb_phone(phone)
96
+ else:
97
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
+ x = self.lrelu(x)
100
+ x = torch.transpose(x, 1, -1) # [b, h, t]
101
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
+ x.dtype
103
+ )
104
+ x = self.encoder(x * x_mask, x_mask)
105
+ stats = self.proj(x) * x_mask
106
+
107
+ m, logs = torch.split(stats, self.out_channels, dim=1)
108
+ return m, logs, x_mask
109
+
110
+
111
+ class ResidualCouplingBlock(nn.Module):
112
+ def __init__(
113
+ self,
114
+ channels,
115
+ hidden_channels,
116
+ kernel_size,
117
+ dilation_rate,
118
+ n_layers,
119
+ n_flows=4,
120
+ gin_channels=0,
121
+ ):
122
+ super().__init__()
123
+ self.channels = channels
124
+ self.hidden_channels = hidden_channels
125
+ self.kernel_size = kernel_size
126
+ self.dilation_rate = dilation_rate
127
+ self.n_layers = n_layers
128
+ self.n_flows = n_flows
129
+ self.gin_channels = gin_channels
130
+
131
+ self.flows = nn.ModuleList()
132
+ for i in range(n_flows):
133
+ self.flows.append(
134
+ modules.ResidualCouplingLayer(
135
+ channels,
136
+ hidden_channels,
137
+ kernel_size,
138
+ dilation_rate,
139
+ n_layers,
140
+ gin_channels=gin_channels,
141
+ mean_only=True,
142
+ )
143
+ )
144
+ self.flows.append(modules.Flip())
145
+
146
+ def forward(self, x, x_mask, g=None, reverse=False):
147
+ if not reverse:
148
+ for flow in self.flows:
149
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
+ else:
151
+ for flow in reversed(self.flows):
152
+ x = flow(x, x_mask, g=g, reverse=reverse)
153
+ return x
154
+
155
+ def remove_weight_norm(self):
156
+ for i in range(self.n_flows):
157
+ self.flows[i * 2].remove_weight_norm()
158
+
159
+
160
+ class PosteriorEncoder(nn.Module):
161
+ def __init__(
162
+ self,
163
+ in_channels,
164
+ out_channels,
165
+ hidden_channels,
166
+ kernel_size,
167
+ dilation_rate,
168
+ n_layers,
169
+ gin_channels=0,
170
+ ):
171
+ super().__init__()
172
+ self.in_channels = in_channels
173
+ self.out_channels = out_channels
174
+ self.hidden_channels = hidden_channels
175
+ self.kernel_size = kernel_size
176
+ self.dilation_rate = dilation_rate
177
+ self.n_layers = n_layers
178
+ self.gin_channels = gin_channels
179
+
180
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
+ self.enc = modules.WN(
182
+ hidden_channels,
183
+ kernel_size,
184
+ dilation_rate,
185
+ n_layers,
186
+ gin_channels=gin_channels,
187
+ )
188
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
+
190
+ def forward(self, x, x_lengths, g=None):
191
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
+ x.dtype
193
+ )
194
+ x = self.pre(x) * x_mask
195
+ x = self.enc(x, x_mask, g=g)
196
+ stats = self.proj(x) * x_mask
197
+ m, logs = torch.split(stats, self.out_channels, dim=1)
198
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
+ return z, m, logs, x_mask
200
+
201
+ def remove_weight_norm(self):
202
+ self.enc.remove_weight_norm()
203
+
204
+
205
+ class Generator(torch.nn.Module):
206
+ def __init__(
207
+ self,
208
+ initial_channel,
209
+ resblock,
210
+ resblock_kernel_sizes,
211
+ resblock_dilation_sizes,
212
+ upsample_rates,
213
+ upsample_initial_channel,
214
+ upsample_kernel_sizes,
215
+ gin_channels=0,
216
+ ):
217
+ super(Generator, self).__init__()
218
+ self.num_kernels = len(resblock_kernel_sizes)
219
+ self.num_upsamples = len(upsample_rates)
220
+ self.conv_pre = Conv1d(
221
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
222
+ )
223
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
+
225
+ self.ups = nn.ModuleList()
226
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
+ self.ups.append(
228
+ weight_norm(
229
+ ConvTranspose1d(
230
+ upsample_initial_channel // (2**i),
231
+ upsample_initial_channel // (2 ** (i + 1)),
232
+ k,
233
+ u,
234
+ padding=(k - u) // 2,
235
+ )
236
+ )
237
+ )
238
+
239
+ self.resblocks = nn.ModuleList()
240
+ for i in range(len(self.ups)):
241
+ ch = upsample_initial_channel // (2 ** (i + 1))
242
+ for j, (k, d) in enumerate(
243
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
+ ):
245
+ self.resblocks.append(resblock(ch, k, d))
246
+
247
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
+ self.ups.apply(init_weights)
249
+
250
+ if gin_channels != 0:
251
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
+
253
+ def forward(self, x, g=None):
254
+ x = self.conv_pre(x)
255
+ if g is not None:
256
+ x = x + self.cond(g)
257
+
258
+ for i in range(self.num_upsamples):
259
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
+ x = self.ups[i](x)
261
+ xs = None
262
+ for j in range(self.num_kernels):
263
+ if xs is None:
264
+ xs = self.resblocks[i * self.num_kernels + j](x)
265
+ else:
266
+ xs += self.resblocks[i * self.num_kernels + j](x)
267
+ x = xs / self.num_kernels
268
+ x = F.leaky_relu(x)
269
+ x = self.conv_post(x)
270
+ x = torch.tanh(x)
271
+
272
+ return x
273
+
274
+ def remove_weight_norm(self):
275
+ for l in self.ups:
276
+ remove_weight_norm(l)
277
+ for l in self.resblocks:
278
+ l.remove_weight_norm()
279
+
280
+
281
+ class SineGen(torch.nn.Module):
282
+ """Definition of sine generator
283
+ SineGen(samp_rate, harmonic_num = 0,
284
+ sine_amp = 0.1, noise_std = 0.003,
285
+ voiced_threshold = 0,
286
+ flag_for_pulse=False)
287
+ samp_rate: sampling rate in Hz
288
+ harmonic_num: number of harmonic overtones (default 0)
289
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
290
+ noise_std: std of Gaussian noise (default 0.003)
291
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
+ Note: when flag_for_pulse is True, the first time step of a voiced
294
+ segment is always sin(np.pi) or cos(0)
295
+ """
296
+
297
+ def __init__(
298
+ self,
299
+ samp_rate,
300
+ harmonic_num=0,
301
+ sine_amp=0.1,
302
+ noise_std=0.003,
303
+ voiced_threshold=0,
304
+ flag_for_pulse=False,
305
+ ):
306
+ super(SineGen, self).__init__()
307
+ self.sine_amp = sine_amp
308
+ self.noise_std = noise_std
309
+ self.harmonic_num = harmonic_num
310
+ self.dim = self.harmonic_num + 1
311
+ self.sampling_rate = samp_rate
312
+ self.voiced_threshold = voiced_threshold
313
+
314
+ def _f02uv(self, f0):
315
+ # generate uv signal
316
+ uv = torch.ones_like(f0)
317
+ uv = uv * (f0 > self.voiced_threshold)
318
+ return uv
319
+
320
+ def forward(self, f0, upp):
321
+ """sine_tensor, uv = forward(f0)
322
+ input F0: tensor(batchsize=1, length, dim=1)
323
+ f0 for unvoiced steps should be 0
324
+ output sine_tensor: tensor(batchsize=1, length, dim)
325
+ output uv: tensor(batchsize=1, length, 1)
326
+ """
327
+ with torch.no_grad():
328
+ f0 = f0[:, None].transpose(1, 2)
329
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
+ # fundamental component
331
+ f0_buf[:, :, 0] = f0[:, :, 0]
332
+ for idx in np.arange(self.harmonic_num):
333
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
+ idx + 2
335
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
+ rand_ini = torch.rand(
338
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
+ )
340
+ rand_ini[:, 0] = 0
341
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
+ tmp_over_one *= upp
344
+ tmp_over_one = F.interpolate(
345
+ tmp_over_one.transpose(2, 1),
346
+ scale_factor=upp,
347
+ mode="linear",
348
+ align_corners=True,
349
+ ).transpose(2, 1)
350
+ rad_values = F.interpolate(
351
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
+ ).transpose(
353
+ 2, 1
354
+ ) #######
355
+ tmp_over_one %= 1
356
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
+ cumsum_shift = torch.zeros_like(rad_values)
358
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
+ sine_waves = torch.sin(
360
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
+ )
362
+ sine_waves = sine_waves * self.sine_amp
363
+ uv = self._f02uv(f0)
364
+ uv = F.interpolate(
365
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
+ ).transpose(2, 1)
367
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
+ noise = noise_amp * torch.randn_like(sine_waves)
369
+ sine_waves = sine_waves * uv + noise
370
+ return sine_waves, uv, noise
371
+
372
+
373
+ class SourceModuleHnNSF(torch.nn.Module):
374
+ """SourceModule for hn-nsf
375
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
+ add_noise_std=0.003, voiced_threshod=0)
377
+ sampling_rate: sampling_rate in Hz
378
+ harmonic_num: number of harmonic above F0 (default: 0)
379
+ sine_amp: amplitude of sine source signal (default: 0.1)
380
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
381
+ note that amplitude of noise in unvoiced is decided
382
+ by sine_amp
383
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
384
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
+ F0_sampled (batchsize, length, 1)
386
+ Sine_source (batchsize, length, 1)
387
+ noise_source (batchsize, length 1)
388
+ uv (batchsize, length, 1)
389
+ """
390
+
391
+ def __init__(
392
+ self,
393
+ sampling_rate,
394
+ harmonic_num=0,
395
+ sine_amp=0.1,
396
+ add_noise_std=0.003,
397
+ voiced_threshod=0,
398
+ is_half=True,
399
+ ):
400
+ super(SourceModuleHnNSF, self).__init__()
401
+
402
+ self.sine_amp = sine_amp
403
+ self.noise_std = add_noise_std
404
+ self.is_half = is_half
405
+ # to produce sine waveforms
406
+ self.l_sin_gen = SineGen(
407
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
+ )
409
+
410
+ # to merge source harmonics into a single excitation
411
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
+ self.l_tanh = torch.nn.Tanh()
413
+
414
+ def forward(self, x, upp=None):
415
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
+ if self.is_half:
417
+ sine_wavs = sine_wavs.half()
418
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
+ return sine_merge, None, None # noise, uv
420
+
421
+
422
+ class GeneratorNSF(torch.nn.Module):
423
+ def __init__(
424
+ self,
425
+ initial_channel,
426
+ resblock,
427
+ resblock_kernel_sizes,
428
+ resblock_dilation_sizes,
429
+ upsample_rates,
430
+ upsample_initial_channel,
431
+ upsample_kernel_sizes,
432
+ gin_channels,
433
+ sr,
434
+ is_half=False,
435
+ ):
436
+ super(GeneratorNSF, self).__init__()
437
+ self.num_kernels = len(resblock_kernel_sizes)
438
+ self.num_upsamples = len(upsample_rates)
439
+
440
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
+ self.m_source = SourceModuleHnNSF(
442
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
443
+ )
444
+ self.noise_convs = nn.ModuleList()
445
+ self.conv_pre = Conv1d(
446
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
447
+ )
448
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
+
450
+ self.ups = nn.ModuleList()
451
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
453
+ self.ups.append(
454
+ weight_norm(
455
+ ConvTranspose1d(
456
+ upsample_initial_channel // (2**i),
457
+ upsample_initial_channel // (2 ** (i + 1)),
458
+ k,
459
+ u,
460
+ padding=(k - u) // 2,
461
+ )
462
+ )
463
+ )
464
+ if i + 1 < len(upsample_rates):
465
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
466
+ self.noise_convs.append(
467
+ Conv1d(
468
+ 1,
469
+ c_cur,
470
+ kernel_size=stride_f0 * 2,
471
+ stride=stride_f0,
472
+ padding=stride_f0 // 2,
473
+ )
474
+ )
475
+ else:
476
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
+
478
+ self.resblocks = nn.ModuleList()
479
+ for i in range(len(self.ups)):
480
+ ch = upsample_initial_channel // (2 ** (i + 1))
481
+ for j, (k, d) in enumerate(
482
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
+ ):
484
+ self.resblocks.append(resblock(ch, k, d))
485
+
486
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
+ self.ups.apply(init_weights)
488
+
489
+ if gin_channels != 0:
490
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
+
492
+ self.upp = np.prod(upsample_rates)
493
+
494
+ def forward(self, x, f0, g=None):
495
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
496
+ har_source = har_source.transpose(1, 2)
497
+ x = self.conv_pre(x)
498
+ if g is not None:
499
+ x = x + self.cond(g)
500
+
501
+ for i in range(self.num_upsamples):
502
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
+ x = self.ups[i](x)
504
+ x_source = self.noise_convs[i](har_source)
505
+ x = x + x_source
506
+ xs = None
507
+ for j in range(self.num_kernels):
508
+ if xs is None:
509
+ xs = self.resblocks[i * self.num_kernels + j](x)
510
+ else:
511
+ xs += self.resblocks[i * self.num_kernels + j](x)
512
+ x = xs / self.num_kernels
513
+ x = F.leaky_relu(x)
514
+ x = self.conv_post(x)
515
+ x = torch.tanh(x)
516
+ return x
517
+
518
+ def remove_weight_norm(self):
519
+ for l in self.ups:
520
+ remove_weight_norm(l)
521
+ for l in self.resblocks:
522
+ l.remove_weight_norm()
523
+
524
+
525
+ sr2sr = {
526
+ "32k": 32000,
527
+ "40k": 40000,
528
+ "48k": 48000,
529
+ }
530
+
531
+
532
+ class SynthesizerTrnMsNSFsidM(nn.Module):
533
+ def __init__(
534
+ self,
535
+ spec_channels,
536
+ segment_size,
537
+ inter_channels,
538
+ hidden_channels,
539
+ filter_channels,
540
+ n_heads,
541
+ n_layers,
542
+ kernel_size,
543
+ p_dropout,
544
+ resblock,
545
+ resblock_kernel_sizes,
546
+ resblock_dilation_sizes,
547
+ upsample_rates,
548
+ upsample_initial_channel,
549
+ upsample_kernel_sizes,
550
+ spk_embed_dim,
551
+ gin_channels,
552
+ sr,
553
+ version,
554
+ **kwargs
555
+ ):
556
+ super().__init__()
557
+ if type(sr) == type("strr"):
558
+ sr = sr2sr[sr]
559
+ self.spec_channels = spec_channels
560
+ self.inter_channels = inter_channels
561
+ self.hidden_channels = hidden_channels
562
+ self.filter_channels = filter_channels
563
+ self.n_heads = n_heads
564
+ self.n_layers = n_layers
565
+ self.kernel_size = kernel_size
566
+ self.p_dropout = p_dropout
567
+ self.resblock = resblock
568
+ self.resblock_kernel_sizes = resblock_kernel_sizes
569
+ self.resblock_dilation_sizes = resblock_dilation_sizes
570
+ self.upsample_rates = upsample_rates
571
+ self.upsample_initial_channel = upsample_initial_channel
572
+ self.upsample_kernel_sizes = upsample_kernel_sizes
573
+ self.segment_size = segment_size
574
+ self.gin_channels = gin_channels
575
+ # self.hop_length = hop_length#
576
+ self.spk_embed_dim = spk_embed_dim
577
+ if version == "v1":
578
+ self.enc_p = TextEncoder256(
579
+ inter_channels,
580
+ hidden_channels,
581
+ filter_channels,
582
+ n_heads,
583
+ n_layers,
584
+ kernel_size,
585
+ p_dropout,
586
+ )
587
+ else:
588
+ self.enc_p = TextEncoder768(
589
+ inter_channels,
590
+ hidden_channels,
591
+ filter_channels,
592
+ n_heads,
593
+ n_layers,
594
+ kernel_size,
595
+ p_dropout,
596
+ )
597
+ self.dec = GeneratorNSF(
598
+ inter_channels,
599
+ resblock,
600
+ resblock_kernel_sizes,
601
+ resblock_dilation_sizes,
602
+ upsample_rates,
603
+ upsample_initial_channel,
604
+ upsample_kernel_sizes,
605
+ gin_channels=gin_channels,
606
+ sr=sr,
607
+ is_half=kwargs["is_half"],
608
+ )
609
+ self.enc_q = PosteriorEncoder(
610
+ spec_channels,
611
+ inter_channels,
612
+ hidden_channels,
613
+ 5,
614
+ 1,
615
+ 16,
616
+ gin_channels=gin_channels,
617
+ )
618
+ self.flow = ResidualCouplingBlock(
619
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
620
+ )
621
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
622
+ self.speaker_map = None
623
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
624
+
625
+ def remove_weight_norm(self):
626
+ self.dec.remove_weight_norm()
627
+ self.flow.remove_weight_norm()
628
+ self.enc_q.remove_weight_norm()
629
+
630
+ def construct_spkmixmap(self, n_speaker):
631
+ self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
632
+ for i in range(n_speaker):
633
+ self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
634
+ self.speaker_map = self.speaker_map.unsqueeze(0)
635
+
636
+ def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
637
+ if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
638
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
639
+ g = g * self.speaker_map # [N, S, B, 1, H]
640
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
641
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
642
+ else:
643
+ g = g.unsqueeze(0)
644
+ g = self.emb_g(g).transpose(1, 2)
645
+
646
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
647
+ z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
648
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
649
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
650
+ return o
651
+
652
+
653
+ class MultiPeriodDiscriminator(torch.nn.Module):
654
+ def __init__(self, use_spectral_norm=False):
655
+ super(MultiPeriodDiscriminator, self).__init__()
656
+ periods = [2, 3, 5, 7, 11, 17]
657
+ # periods = [3, 5, 7, 11, 17, 23, 37]
658
+
659
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
660
+ discs = discs + [
661
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
662
+ ]
663
+ self.discriminators = nn.ModuleList(discs)
664
+
665
+ def forward(self, y, y_hat):
666
+ y_d_rs = [] #
667
+ y_d_gs = []
668
+ fmap_rs = []
669
+ fmap_gs = []
670
+ for i, d in enumerate(self.discriminators):
671
+ y_d_r, fmap_r = d(y)
672
+ y_d_g, fmap_g = d(y_hat)
673
+ # for j in range(len(fmap_r)):
674
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
675
+ y_d_rs.append(y_d_r)
676
+ y_d_gs.append(y_d_g)
677
+ fmap_rs.append(fmap_r)
678
+ fmap_gs.append(fmap_g)
679
+
680
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
681
+
682
+
683
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
684
+ def __init__(self, use_spectral_norm=False):
685
+ super(MultiPeriodDiscriminatorV2, self).__init__()
686
+ # periods = [2, 3, 5, 7, 11, 17]
687
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
688
+
689
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
690
+ discs = discs + [
691
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
692
+ ]
693
+ self.discriminators = nn.ModuleList(discs)
694
+
695
+ def forward(self, y, y_hat):
696
+ y_d_rs = [] #
697
+ y_d_gs = []
698
+ fmap_rs = []
699
+ fmap_gs = []
700
+ for i, d in enumerate(self.discriminators):
701
+ y_d_r, fmap_r = d(y)
702
+ y_d_g, fmap_g = d(y_hat)
703
+ # for j in range(len(fmap_r)):
704
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
705
+ y_d_rs.append(y_d_r)
706
+ y_d_gs.append(y_d_g)
707
+ fmap_rs.append(fmap_r)
708
+ fmap_gs.append(fmap_g)
709
+
710
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
711
+
712
+
713
+ class DiscriminatorS(torch.nn.Module):
714
+ def __init__(self, use_spectral_norm=False):
715
+ super(DiscriminatorS, self).__init__()
716
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
717
+ self.convs = nn.ModuleList(
718
+ [
719
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
720
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
721
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
722
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
723
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
724
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
725
+ ]
726
+ )
727
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
728
+
729
+ def forward(self, x):
730
+ fmap = []
731
+
732
+ for l in self.convs:
733
+ x = l(x)
734
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
735
+ fmap.append(x)
736
+ x = self.conv_post(x)
737
+ fmap.append(x)
738
+ x = torch.flatten(x, 1, -1)
739
+
740
+ return x, fmap
741
+
742
+
743
+ class DiscriminatorP(torch.nn.Module):
744
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
745
+ super(DiscriminatorP, self).__init__()
746
+ self.period = period
747
+ self.use_spectral_norm = use_spectral_norm
748
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
749
+ self.convs = nn.ModuleList(
750
+ [
751
+ norm_f(
752
+ Conv2d(
753
+ 1,
754
+ 32,
755
+ (kernel_size, 1),
756
+ (stride, 1),
757
+ padding=(get_padding(kernel_size, 1), 0),
758
+ )
759
+ ),
760
+ norm_f(
761
+ Conv2d(
762
+ 32,
763
+ 128,
764
+ (kernel_size, 1),
765
+ (stride, 1),
766
+ padding=(get_padding(kernel_size, 1), 0),
767
+ )
768
+ ),
769
+ norm_f(
770
+ Conv2d(
771
+ 128,
772
+ 512,
773
+ (kernel_size, 1),
774
+ (stride, 1),
775
+ padding=(get_padding(kernel_size, 1), 0),
776
+ )
777
+ ),
778
+ norm_f(
779
+ Conv2d(
780
+ 512,
781
+ 1024,
782
+ (kernel_size, 1),
783
+ (stride, 1),
784
+ padding=(get_padding(kernel_size, 1), 0),
785
+ )
786
+ ),
787
+ norm_f(
788
+ Conv2d(
789
+ 1024,
790
+ 1024,
791
+ (kernel_size, 1),
792
+ 1,
793
+ padding=(get_padding(kernel_size, 1), 0),
794
+ )
795
+ ),
796
+ ]
797
+ )
798
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
799
+
800
+ def forward(self, x):
801
+ fmap = []
802
+
803
+ # 1d to 2d
804
+ b, c, t = x.shape
805
+ if t % self.period != 0: # pad first
806
+ n_pad = self.period - (t % self.period)
807
+ x = F.pad(x, (0, n_pad), "reflect")
808
+ t = t + n_pad
809
+ x = x.view(b, c, t // self.period, self.period)
810
+
811
+ for l in self.convs:
812
+ x = l(x)
813
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
814
+ fmap.append(x)
815
+ x = self.conv_post(x)
816
+ fmap.append(x)
817
+ x = torch.flatten(x, 1, -1)
818
+
819
+ return x, fmap
BanG-Dream-MyGO/lib/infer_pack/modules.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ from lib.infer_pack import commons
13
+ from lib.infer_pack.commons import init_weights, get_padding
14
+ from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
15
+
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(
37
+ self,
38
+ in_channels,
39
+ hidden_channels,
40
+ out_channels,
41
+ kernel_size,
42
+ n_layers,
43
+ p_dropout,
44
+ ):
45
+ super().__init__()
46
+ self.in_channels = in_channels
47
+ self.hidden_channels = hidden_channels
48
+ self.out_channels = out_channels
49
+ self.kernel_size = kernel_size
50
+ self.n_layers = n_layers
51
+ self.p_dropout = p_dropout
52
+ assert n_layers > 1, "Number of layers should be larger than 0."
53
+
54
+ self.conv_layers = nn.ModuleList()
55
+ self.norm_layers = nn.ModuleList()
56
+ self.conv_layers.append(
57
+ nn.Conv1d(
58
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
+ )
60
+ )
61
+ self.norm_layers.append(LayerNorm(hidden_channels))
62
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
63
+ for _ in range(n_layers - 1):
64
+ self.conv_layers.append(
65
+ nn.Conv1d(
66
+ hidden_channels,
67
+ hidden_channels,
68
+ kernel_size,
69
+ padding=kernel_size // 2,
70
+ )
71
+ )
72
+ self.norm_layers.append(LayerNorm(hidden_channels))
73
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
+ self.proj.weight.data.zero_()
75
+ self.proj.bias.data.zero_()
76
+
77
+ def forward(self, x, x_mask):
78
+ x_org = x
79
+ for i in range(self.n_layers):
80
+ x = self.conv_layers[i](x * x_mask)
81
+ x = self.norm_layers[i](x)
82
+ x = self.relu_drop(x)
83
+ x = x_org + self.proj(x)
84
+ return x * x_mask
85
+
86
+
87
+ class DDSConv(nn.Module):
88
+ """
89
+ Dialted and Depth-Separable Convolution
90
+ """
91
+
92
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
+ super().__init__()
94
+ self.channels = channels
95
+ self.kernel_size = kernel_size
96
+ self.n_layers = n_layers
97
+ self.p_dropout = p_dropout
98
+
99
+ self.drop = nn.Dropout(p_dropout)
100
+ self.convs_sep = nn.ModuleList()
101
+ self.convs_1x1 = nn.ModuleList()
102
+ self.norms_1 = nn.ModuleList()
103
+ self.norms_2 = nn.ModuleList()
104
+ for i in range(n_layers):
105
+ dilation = kernel_size**i
106
+ padding = (kernel_size * dilation - dilation) // 2
107
+ self.convs_sep.append(
108
+ nn.Conv1d(
109
+ channels,
110
+ channels,
111
+ kernel_size,
112
+ groups=channels,
113
+ dilation=dilation,
114
+ padding=padding,
115
+ )
116
+ )
117
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
+ self.norms_1.append(LayerNorm(channels))
119
+ self.norms_2.append(LayerNorm(channels))
120
+
121
+ def forward(self, x, x_mask, g=None):
122
+ if g is not None:
123
+ x = x + g
124
+ for i in range(self.n_layers):
125
+ y = self.convs_sep[i](x * x_mask)
126
+ y = self.norms_1[i](y)
127
+ y = F.gelu(y)
128
+ y = self.convs_1x1[i](y)
129
+ y = self.norms_2[i](y)
130
+ y = F.gelu(y)
131
+ y = self.drop(y)
132
+ x = x + y
133
+ return x * x_mask
134
+
135
+
136
+ class WN(torch.nn.Module):
137
+ def __init__(
138
+ self,
139
+ hidden_channels,
140
+ kernel_size,
141
+ dilation_rate,
142
+ n_layers,
143
+ gin_channels=0,
144
+ p_dropout=0,
145
+ ):
146
+ super(WN, self).__init__()
147
+ assert kernel_size % 2 == 1
148
+ self.hidden_channels = hidden_channels
149
+ self.kernel_size = (kernel_size,)
150
+ self.dilation_rate = dilation_rate
151
+ self.n_layers = n_layers
152
+ self.gin_channels = gin_channels
153
+ self.p_dropout = p_dropout
154
+
155
+ self.in_layers = torch.nn.ModuleList()
156
+ self.res_skip_layers = torch.nn.ModuleList()
157
+ self.drop = nn.Dropout(p_dropout)
158
+
159
+ if gin_channels != 0:
160
+ cond_layer = torch.nn.Conv1d(
161
+ gin_channels, 2 * hidden_channels * n_layers, 1
162
+ )
163
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
+
165
+ for i in range(n_layers):
166
+ dilation = dilation_rate**i
167
+ padding = int((kernel_size * dilation - dilation) / 2)
168
+ in_layer = torch.nn.Conv1d(
169
+ hidden_channels,
170
+ 2 * hidden_channels,
171
+ kernel_size,
172
+ dilation=dilation,
173
+ padding=padding,
174
+ )
175
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
+ self.in_layers.append(in_layer)
177
+
178
+ # last one is not necessary
179
+ if i < n_layers - 1:
180
+ res_skip_channels = 2 * hidden_channels
181
+ else:
182
+ res_skip_channels = hidden_channels
183
+
184
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
+ self.res_skip_layers.append(res_skip_layer)
187
+
188
+ def forward(self, x, x_mask, g=None, **kwargs):
189
+ output = torch.zeros_like(x)
190
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
191
+
192
+ if g is not None:
193
+ g = self.cond_layer(g)
194
+
195
+ for i in range(self.n_layers):
196
+ x_in = self.in_layers[i](x)
197
+ if g is not None:
198
+ cond_offset = i * 2 * self.hidden_channels
199
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
200
+ else:
201
+ g_l = torch.zeros_like(x_in)
202
+
203
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
204
+ acts = self.drop(acts)
205
+
206
+ res_skip_acts = self.res_skip_layers[i](acts)
207
+ if i < self.n_layers - 1:
208
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
209
+ x = (x + res_acts) * x_mask
210
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
211
+ else:
212
+ output = output + res_skip_acts
213
+ return output * x_mask
214
+
215
+ def remove_weight_norm(self):
216
+ if self.gin_channels != 0:
217
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
218
+ for l in self.in_layers:
219
+ torch.nn.utils.remove_weight_norm(l)
220
+ for l in self.res_skip_layers:
221
+ torch.nn.utils.remove_weight_norm(l)
222
+
223
+
224
+ class ResBlock1(torch.nn.Module):
225
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
226
+ super(ResBlock1, self).__init__()
227
+ self.convs1 = nn.ModuleList(
228
+ [
229
+ weight_norm(
230
+ Conv1d(
231
+ channels,
232
+ channels,
233
+ kernel_size,
234
+ 1,
235
+ dilation=dilation[0],
236
+ padding=get_padding(kernel_size, dilation[0]),
237
+ )
238
+ ),
239
+ weight_norm(
240
+ Conv1d(
241
+ channels,
242
+ channels,
243
+ kernel_size,
244
+ 1,
245
+ dilation=dilation[1],
246
+ padding=get_padding(kernel_size, dilation[1]),
247
+ )
248
+ ),
249
+ weight_norm(
250
+ Conv1d(
251
+ channels,
252
+ channels,
253
+ kernel_size,
254
+ 1,
255
+ dilation=dilation[2],
256
+ padding=get_padding(kernel_size, dilation[2]),
257
+ )
258
+ ),
259
+ ]
260
+ )
261
+ self.convs1.apply(init_weights)
262
+
263
+ self.convs2 = nn.ModuleList(
264
+ [
265
+ weight_norm(
266
+ Conv1d(
267
+ channels,
268
+ channels,
269
+ kernel_size,
270
+ 1,
271
+ dilation=1,
272
+ padding=get_padding(kernel_size, 1),
273
+ )
274
+ ),
275
+ weight_norm(
276
+ Conv1d(
277
+ channels,
278
+ channels,
279
+ kernel_size,
280
+ 1,
281
+ dilation=1,
282
+ padding=get_padding(kernel_size, 1),
283
+ )
284
+ ),
285
+ weight_norm(
286
+ Conv1d(
287
+ channels,
288
+ channels,
289
+ kernel_size,
290
+ 1,
291
+ dilation=1,
292
+ padding=get_padding(kernel_size, 1),
293
+ )
294
+ ),
295
+ ]
296
+ )
297
+ self.convs2.apply(init_weights)
298
+
299
+ def forward(self, x, x_mask=None):
300
+ for c1, c2 in zip(self.convs1, self.convs2):
301
+ xt = F.leaky_relu(x, LRELU_SLOPE)
302
+ if x_mask is not None:
303
+ xt = xt * x_mask
304
+ xt = c1(xt)
305
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
306
+ if x_mask is not None:
307
+ xt = xt * x_mask
308
+ xt = c2(xt)
309
+ x = xt + x
310
+ if x_mask is not None:
311
+ x = x * x_mask
312
+ return x
313
+
314
+ def remove_weight_norm(self):
315
+ for l in self.convs1:
316
+ remove_weight_norm(l)
317
+ for l in self.convs2:
318
+ remove_weight_norm(l)
319
+
320
+
321
+ class ResBlock2(torch.nn.Module):
322
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
323
+ super(ResBlock2, self).__init__()
324
+ self.convs = nn.ModuleList(
325
+ [
326
+ weight_norm(
327
+ Conv1d(
328
+ channels,
329
+ channels,
330
+ kernel_size,
331
+ 1,
332
+ dilation=dilation[0],
333
+ padding=get_padding(kernel_size, dilation[0]),
334
+ )
335
+ ),
336
+ weight_norm(
337
+ Conv1d(
338
+ channels,
339
+ channels,
340
+ kernel_size,
341
+ 1,
342
+ dilation=dilation[1],
343
+ padding=get_padding(kernel_size, dilation[1]),
344
+ )
345
+ ),
346
+ ]
347
+ )
348
+ self.convs.apply(init_weights)
349
+
350
+ def forward(self, x, x_mask=None):
351
+ for c in self.convs:
352
+ xt = F.leaky_relu(x, LRELU_SLOPE)
353
+ if x_mask is not None:
354
+ xt = xt * x_mask
355
+ xt = c(xt)
356
+ x = xt + x
357
+ if x_mask is not None:
358
+ x = x * x_mask
359
+ return x
360
+
361
+ def remove_weight_norm(self):
362
+ for l in self.convs:
363
+ remove_weight_norm(l)
364
+
365
+
366
+ class Log(nn.Module):
367
+ def forward(self, x, x_mask, reverse=False, **kwargs):
368
+ if not reverse:
369
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
370
+ logdet = torch.sum(-y, [1, 2])
371
+ return y, logdet
372
+ else:
373
+ x = torch.exp(x) * x_mask
374
+ return x
375
+
376
+
377
+ class Flip(nn.Module):
378
+ def forward(self, x, *args, reverse=False, **kwargs):
379
+ x = torch.flip(x, [1])
380
+ if not reverse:
381
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
382
+ return x, logdet
383
+ else:
384
+ return x
385
+
386
+
387
+ class ElementwiseAffine(nn.Module):
388
+ def __init__(self, channels):
389
+ super().__init__()
390
+ self.channels = channels
391
+ self.m = nn.Parameter(torch.zeros(channels, 1))
392
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
393
+
394
+ def forward(self, x, x_mask, reverse=False, **kwargs):
395
+ if not reverse:
396
+ y = self.m + torch.exp(self.logs) * x
397
+ y = y * x_mask
398
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
399
+ return y, logdet
400
+ else:
401
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
402
+ return x
403
+
404
+
405
+ class ResidualCouplingLayer(nn.Module):
406
+ def __init__(
407
+ self,
408
+ channels,
409
+ hidden_channels,
410
+ kernel_size,
411
+ dilation_rate,
412
+ n_layers,
413
+ p_dropout=0,
414
+ gin_channels=0,
415
+ mean_only=False,
416
+ ):
417
+ assert channels % 2 == 0, "channels should be divisible by 2"
418
+ super().__init__()
419
+ self.channels = channels
420
+ self.hidden_channels = hidden_channels
421
+ self.kernel_size = kernel_size
422
+ self.dilation_rate = dilation_rate
423
+ self.n_layers = n_layers
424
+ self.half_channels = channels // 2
425
+ self.mean_only = mean_only
426
+
427
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
428
+ self.enc = WN(
429
+ hidden_channels,
430
+ kernel_size,
431
+ dilation_rate,
432
+ n_layers,
433
+ p_dropout=p_dropout,
434
+ gin_channels=gin_channels,
435
+ )
436
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
437
+ self.post.weight.data.zero_()
438
+ self.post.bias.data.zero_()
439
+
440
+ def forward(self, x, x_mask, g=None, reverse=False):
441
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
442
+ h = self.pre(x0) * x_mask
443
+ h = self.enc(h, x_mask, g=g)
444
+ stats = self.post(h) * x_mask
445
+ if not self.mean_only:
446
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
447
+ else:
448
+ m = stats
449
+ logs = torch.zeros_like(m)
450
+
451
+ if not reverse:
452
+ x1 = m + x1 * torch.exp(logs) * x_mask
453
+ x = torch.cat([x0, x1], 1)
454
+ logdet = torch.sum(logs, [1, 2])
455
+ return x, logdet
456
+ else:
457
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
458
+ x = torch.cat([x0, x1], 1)
459
+ return x
460
+
461
+ def remove_weight_norm(self):
462
+ self.enc.remove_weight_norm()
463
+
464
+
465
+ class ConvFlow(nn.Module):
466
+ def __init__(
467
+ self,
468
+ in_channels,
469
+ filter_channels,
470
+ kernel_size,
471
+ n_layers,
472
+ num_bins=10,
473
+ tail_bound=5.0,
474
+ ):
475
+ super().__init__()
476
+ self.in_channels = in_channels
477
+ self.filter_channels = filter_channels
478
+ self.kernel_size = kernel_size
479
+ self.n_layers = n_layers
480
+ self.num_bins = num_bins
481
+ self.tail_bound = tail_bound
482
+ self.half_channels = in_channels // 2
483
+
484
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
485
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
486
+ self.proj = nn.Conv1d(
487
+ filter_channels, self.half_channels * (num_bins * 3 - 1), 1
488
+ )
489
+ self.proj.weight.data.zero_()
490
+ self.proj.bias.data.zero_()
491
+
492
+ def forward(self, x, x_mask, g=None, reverse=False):
493
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
494
+ h = self.pre(x0)
495
+ h = self.convs(h, x_mask, g=g)
496
+ h = self.proj(h) * x_mask
497
+
498
+ b, c, t = x0.shape
499
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
500
+
501
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
502
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
503
+ self.filter_channels
504
+ )
505
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
506
+
507
+ x1, logabsdet = piecewise_rational_quadratic_transform(
508
+ x1,
509
+ unnormalized_widths,
510
+ unnormalized_heights,
511
+ unnormalized_derivatives,
512
+ inverse=reverse,
513
+ tails="linear",
514
+ tail_bound=self.tail_bound,
515
+ )
516
+
517
+ x = torch.cat([x0, x1], 1) * x_mask
518
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
519
+ if not reverse:
520
+ return x, logdet
521
+ else:
522
+ return x
BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+
6
+ class DioF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ """
15
+ 对F0进行插值处理
16
+ """
17
+
18
+ data = np.reshape(f0, (f0.size, 1))
19
+
20
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
+ vuv_vector[data > 0.0] = 1.0
22
+ vuv_vector[data <= 0.0] = 0.0
23
+
24
+ ip_data = data
25
+
26
+ frame_number = data.size
27
+ last_value = 0.0
28
+ for i in range(frame_number):
29
+ if data[i] <= 0.0:
30
+ j = i + 1
31
+ for j in range(i + 1, frame_number):
32
+ if data[j] > 0.0:
33
+ break
34
+ if j < frame_number - 1:
35
+ if last_value > 0.0:
36
+ step = (data[j] - data[i - 1]) / float(j - i)
37
+ for k in range(i, j):
38
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
39
+ else:
40
+ for k in range(i, j):
41
+ ip_data[k] = data[j]
42
+ else:
43
+ for k in range(i, frame_number):
44
+ ip_data[k] = last_value
45
+ else:
46
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
+ last_value = data[i]
48
+
49
+ return ip_data[:, 0], vuv_vector[:, 0]
50
+
51
+ def resize_f0(self, x, target_len):
52
+ source = np.array(x)
53
+ source[source < 0.001] = np.nan
54
+ target = np.interp(
55
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
56
+ np.arange(0, len(source)),
57
+ source,
58
+ )
59
+ res = np.nan_to_num(target)
60
+ return res
61
+
62
+ def compute_f0(self, wav, p_len=None):
63
+ if p_len is None:
64
+ p_len = wav.shape[0] // self.hop_length
65
+ f0, t = pyworld.dio(
66
+ wav.astype(np.double),
67
+ fs=self.sampling_rate,
68
+ f0_floor=self.f0_min,
69
+ f0_ceil=self.f0_max,
70
+ frame_period=1000 * self.hop_length / self.sampling_rate,
71
+ )
72
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
73
+ for index, pitch in enumerate(f0):
74
+ f0[index] = round(pitch, 1)
75
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
76
+
77
+ def compute_f0_uv(self, wav, p_len=None):
78
+ if p_len is None:
79
+ p_len = wav.shape[0] // self.hop_length
80
+ f0, t = pyworld.dio(
81
+ wav.astype(np.double),
82
+ fs=self.sampling_rate,
83
+ f0_floor=self.f0_min,
84
+ f0_ceil=self.f0_max,
85
+ frame_period=1000 * self.hop_length / self.sampling_rate,
86
+ )
87
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
88
+ for index, pitch in enumerate(f0):
89
+ f0[index] = round(pitch, 1)
90
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/F0Predictor.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class F0Predictor(object):
2
+ def compute_f0(self, wav, p_len):
3
+ """
4
+ input: wav:[signal_length]
5
+ p_len:int
6
+ output: f0:[signal_length//hop_length]
7
+ """
8
+ pass
9
+
10
+ def compute_f0_uv(self, wav, p_len):
11
+ """
12
+ input: wav:[signal_length]
13
+ p_len:int
14
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
15
+ """
16
+ pass
BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+
6
+ class HarvestF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ """
15
+ 对F0进行插值处理
16
+ """
17
+
18
+ data = np.reshape(f0, (f0.size, 1))
19
+
20
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
+ vuv_vector[data > 0.0] = 1.0
22
+ vuv_vector[data <= 0.0] = 0.0
23
+
24
+ ip_data = data
25
+
26
+ frame_number = data.size
27
+ last_value = 0.0
28
+ for i in range(frame_number):
29
+ if data[i] <= 0.0:
30
+ j = i + 1
31
+ for j in range(i + 1, frame_number):
32
+ if data[j] > 0.0:
33
+ break
34
+ if j < frame_number - 1:
35
+ if last_value > 0.0:
36
+ step = (data[j] - data[i - 1]) / float(j - i)
37
+ for k in range(i, j):
38
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
39
+ else:
40
+ for k in range(i, j):
41
+ ip_data[k] = data[j]
42
+ else:
43
+ for k in range(i, frame_number):
44
+ ip_data[k] = last_value
45
+ else:
46
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
+ last_value = data[i]
48
+
49
+ return ip_data[:, 0], vuv_vector[:, 0]
50
+
51
+ def resize_f0(self, x, target_len):
52
+ source = np.array(x)
53
+ source[source < 0.001] = np.nan
54
+ target = np.interp(
55
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
56
+ np.arange(0, len(source)),
57
+ source,
58
+ )
59
+ res = np.nan_to_num(target)
60
+ return res
61
+
62
+ def compute_f0(self, wav, p_len=None):
63
+ if p_len is None:
64
+ p_len = wav.shape[0] // self.hop_length
65
+ f0, t = pyworld.harvest(
66
+ wav.astype(np.double),
67
+ fs=self.hop_length,
68
+ f0_ceil=self.f0_max,
69
+ f0_floor=self.f0_min,
70
+ frame_period=1000 * self.hop_length / self.sampling_rate,
71
+ )
72
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
73
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
74
+
75
+ def compute_f0_uv(self, wav, p_len=None):
76
+ if p_len is None:
77
+ p_len = wav.shape[0] // self.hop_length
78
+ f0, t = pyworld.harvest(
79
+ wav.astype(np.double),
80
+ fs=self.sampling_rate,
81
+ f0_floor=self.f0_min,
82
+ f0_ceil=self.f0_max,
83
+ frame_period=1000 * self.hop_length / self.sampling_rate,
84
+ )
85
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
86
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import parselmouth
3
+ import numpy as np
4
+
5
+
6
+ class PMF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ """
15
+ 对F0进行插值处理
16
+ """
17
+
18
+ data = np.reshape(f0, (f0.size, 1))
19
+
20
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
+ vuv_vector[data > 0.0] = 1.0
22
+ vuv_vector[data <= 0.0] = 0.0
23
+
24
+ ip_data = data
25
+
26
+ frame_number = data.size
27
+ last_value = 0.0
28
+ for i in range(frame_number):
29
+ if data[i] <= 0.0:
30
+ j = i + 1
31
+ for j in range(i + 1, frame_number):
32
+ if data[j] > 0.0:
33
+ break
34
+ if j < frame_number - 1:
35
+ if last_value > 0.0:
36
+ step = (data[j] - data[i - 1]) / float(j - i)
37
+ for k in range(i, j):
38
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
39
+ else:
40
+ for k in range(i, j):
41
+ ip_data[k] = data[j]
42
+ else:
43
+ for k in range(i, frame_number):
44
+ ip_data[k] = last_value
45
+ else:
46
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
+ last_value = data[i]
48
+
49
+ return ip_data[:, 0], vuv_vector[:, 0]
50
+
51
+ def compute_f0(self, wav, p_len=None):
52
+ x = wav
53
+ if p_len is None:
54
+ p_len = x.shape[0] // self.hop_length
55
+ else:
56
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
57
+ time_step = self.hop_length / self.sampling_rate * 1000
58
+ f0 = (
59
+ parselmouth.Sound(x, self.sampling_rate)
60
+ .to_pitch_ac(
61
+ time_step=time_step / 1000,
62
+ voicing_threshold=0.6,
63
+ pitch_floor=self.f0_min,
64
+ pitch_ceiling=self.f0_max,
65
+ )
66
+ .selected_array["frequency"]
67
+ )
68
+
69
+ pad_size = (p_len - len(f0) + 1) // 2
70
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
71
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
72
+ f0, uv = self.interpolate_f0(f0)
73
+ return f0
74
+
75
+ def compute_f0_uv(self, wav, p_len=None):
76
+ x = wav
77
+ if p_len is None:
78
+ p_len = x.shape[0] // self.hop_length
79
+ else:
80
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
81
+ time_step = self.hop_length / self.sampling_rate * 1000
82
+ f0 = (
83
+ parselmouth.Sound(x, self.sampling_rate)
84
+ .to_pitch_ac(
85
+ time_step=time_step / 1000,
86
+ voicing_threshold=0.6,
87
+ pitch_floor=self.f0_min,
88
+ pitch_ceiling=self.f0_max,
89
+ )
90
+ .selected_array["frequency"]
91
+ )
92
+
93
+ pad_size = (p_len - len(f0) + 1) // 2
94
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
95
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
96
+ f0, uv = self.interpolate_f0(f0)
97
+ return f0, uv
BanG-Dream-MyGO/lib/infer_pack/modules/F0Predictor/__init__.py ADDED
File without changes
BanG-Dream-MyGO/lib/infer_pack/onnx_inference.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime
2
+ import librosa
3
+ import numpy as np
4
+ import soundfile
5
+
6
+
7
+ class ContentVec:
8
+ def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
9
+ print("load model(s) from {}".format(vec_path))
10
+ if device == "cpu" or device is None:
11
+ providers = ["CPUExecutionProvider"]
12
+ elif device == "cuda":
13
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
14
+ elif device == "dml":
15
+ providers = ["DmlExecutionProvider"]
16
+ else:
17
+ raise RuntimeError("Unsportted Device")
18
+ self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
19
+
20
+ def __call__(self, wav):
21
+ return self.forward(wav)
22
+
23
+ def forward(self, wav):
24
+ feats = wav
25
+ if feats.ndim == 2: # double channels
26
+ feats = feats.mean(-1)
27
+ assert feats.ndim == 1, feats.ndim
28
+ feats = np.expand_dims(np.expand_dims(feats, 0), 0)
29
+ onnx_input = {self.model.get_inputs()[0].name: feats}
30
+ logits = self.model.run(None, onnx_input)[0]
31
+ return logits.transpose(0, 2, 1)
32
+
33
+
34
+ def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
35
+ if f0_predictor == "pm":
36
+ from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
37
+
38
+ f0_predictor_object = PMF0Predictor(
39
+ hop_length=hop_length, sampling_rate=sampling_rate
40
+ )
41
+ elif f0_predictor == "harvest":
42
+ from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
43
+ HarvestF0Predictor,
44
+ )
45
+
46
+ f0_predictor_object = HarvestF0Predictor(
47
+ hop_length=hop_length, sampling_rate=sampling_rate
48
+ )
49
+ elif f0_predictor == "dio":
50
+ from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
51
+
52
+ f0_predictor_object = DioF0Predictor(
53
+ hop_length=hop_length, sampling_rate=sampling_rate
54
+ )
55
+ else:
56
+ raise Exception("Unknown f0 predictor")
57
+ return f0_predictor_object
58
+
59
+
60
+ class OnnxRVC:
61
+ def __init__(
62
+ self,
63
+ model_path,
64
+ sr=40000,
65
+ hop_size=512,
66
+ vec_path="vec-768-layer-12",
67
+ device="cpu",
68
+ ):
69
+ vec_path = f"pretrained/{vec_path}.onnx"
70
+ self.vec_model = ContentVec(vec_path, device)
71
+ if device == "cpu" or device is None:
72
+ providers = ["CPUExecutionProvider"]
73
+ elif device == "cuda":
74
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
75
+ elif device == "dml":
76
+ providers = ["DmlExecutionProvider"]
77
+ else:
78
+ raise RuntimeError("Unsportted Device")
79
+ self.model = onnxruntime.InferenceSession(model_path, providers=providers)
80
+ self.sampling_rate = sr
81
+ self.hop_size = hop_size
82
+
83
+ def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
84
+ onnx_input = {
85
+ self.model.get_inputs()[0].name: hubert,
86
+ self.model.get_inputs()[1].name: hubert_length,
87
+ self.model.get_inputs()[2].name: pitch,
88
+ self.model.get_inputs()[3].name: pitchf,
89
+ self.model.get_inputs()[4].name: ds,
90
+ self.model.get_inputs()[5].name: rnd,
91
+ }
92
+ return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
93
+
94
+ def inference(
95
+ self,
96
+ raw_path,
97
+ sid,
98
+ f0_method="dio",
99
+ f0_up_key=0,
100
+ pad_time=0.5,
101
+ cr_threshold=0.02,
102
+ ):
103
+ f0_min = 50
104
+ f0_max = 1100
105
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
106
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
107
+ f0_predictor = get_f0_predictor(
108
+ f0_method,
109
+ hop_length=self.hop_size,
110
+ sampling_rate=self.sampling_rate,
111
+ threshold=cr_threshold,
112
+ )
113
+ wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
114
+ org_length = len(wav)
115
+ if org_length / sr > 50.0:
116
+ raise RuntimeError("Reached Max Length")
117
+
118
+ wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
119
+ wav16k = wav16k
120
+
121
+ hubert = self.vec_model(wav16k)
122
+ hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
123
+ hubert_length = hubert.shape[1]
124
+
125
+ pitchf = f0_predictor.compute_f0(wav, hubert_length)
126
+ pitchf = pitchf * 2 ** (f0_up_key / 12)
127
+ pitch = pitchf.copy()
128
+ f0_mel = 1127 * np.log(1 + pitch / 700)
129
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
130
+ f0_mel_max - f0_mel_min
131
+ ) + 1
132
+ f0_mel[f0_mel <= 1] = 1
133
+ f0_mel[f0_mel > 255] = 255
134
+ pitch = np.rint(f0_mel).astype(np.int64)
135
+
136
+ pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
137
+ pitch = pitch.reshape(1, len(pitch))
138
+ ds = np.array([sid]).astype(np.int64)
139
+
140
+ rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
141
+ hubert_length = np.array([hubert_length]).astype(np.int64)
142
+
143
+ out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
144
+ out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
145
+ return out_wav[0:org_length]
BanG-Dream-MyGO/lib/infer_pack/transforms.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+
7
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
8
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
+ DEFAULT_MIN_DERIVATIVE = 1e-3
10
+
11
+
12
+ def piecewise_rational_quadratic_transform(
13
+ inputs,
14
+ unnormalized_widths,
15
+ unnormalized_heights,
16
+ unnormalized_derivatives,
17
+ inverse=False,
18
+ tails=None,
19
+ tail_bound=1.0,
20
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
21
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
22
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
23
+ ):
24
+ if tails is None:
25
+ spline_fn = rational_quadratic_spline
26
+ spline_kwargs = {}
27
+ else:
28
+ spline_fn = unconstrained_rational_quadratic_spline
29
+ spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
30
+
31
+ outputs, logabsdet = spline_fn(
32
+ inputs=inputs,
33
+ unnormalized_widths=unnormalized_widths,
34
+ unnormalized_heights=unnormalized_heights,
35
+ unnormalized_derivatives=unnormalized_derivatives,
36
+ inverse=inverse,
37
+ min_bin_width=min_bin_width,
38
+ min_bin_height=min_bin_height,
39
+ min_derivative=min_derivative,
40
+ **spline_kwargs
41
+ )
42
+ return outputs, logabsdet
43
+
44
+
45
+ def searchsorted(bin_locations, inputs, eps=1e-6):
46
+ bin_locations[..., -1] += eps
47
+ return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
48
+
49
+
50
+ def unconstrained_rational_quadratic_spline(
51
+ inputs,
52
+ unnormalized_widths,
53
+ unnormalized_heights,
54
+ unnormalized_derivatives,
55
+ inverse=False,
56
+ tails="linear",
57
+ tail_bound=1.0,
58
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
59
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
60
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
61
+ ):
62
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
63
+ outside_interval_mask = ~inside_interval_mask
64
+
65
+ outputs = torch.zeros_like(inputs)
66
+ logabsdet = torch.zeros_like(inputs)
67
+
68
+ if tails == "linear":
69
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
70
+ constant = np.log(np.exp(1 - min_derivative) - 1)
71
+ unnormalized_derivatives[..., 0] = constant
72
+ unnormalized_derivatives[..., -1] = constant
73
+
74
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
75
+ logabsdet[outside_interval_mask] = 0
76
+ else:
77
+ raise RuntimeError("{} tails are not implemented.".format(tails))
78
+
79
+ (
80
+ outputs[inside_interval_mask],
81
+ logabsdet[inside_interval_mask],
82
+ ) = rational_quadratic_spline(
83
+ inputs=inputs[inside_interval_mask],
84
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
+ inverse=inverse,
88
+ left=-tail_bound,
89
+ right=tail_bound,
90
+ bottom=-tail_bound,
91
+ top=tail_bound,
92
+ min_bin_width=min_bin_width,
93
+ min_bin_height=min_bin_height,
94
+ min_derivative=min_derivative,
95
+ )
96
+
97
+ return outputs, logabsdet
98
+
99
+
100
+ def rational_quadratic_spline(
101
+ inputs,
102
+ unnormalized_widths,
103
+ unnormalized_heights,
104
+ unnormalized_derivatives,
105
+ inverse=False,
106
+ left=0.0,
107
+ right=1.0,
108
+ bottom=0.0,
109
+ top=1.0,
110
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
111
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
112
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
113
+ ):
114
+ if torch.min(inputs) < left or torch.max(inputs) > right:
115
+ raise ValueError("Input to a transform is not within its domain")
116
+
117
+ num_bins = unnormalized_widths.shape[-1]
118
+
119
+ if min_bin_width * num_bins > 1.0:
120
+ raise ValueError("Minimal bin width too large for the number of bins")
121
+ if min_bin_height * num_bins > 1.0:
122
+ raise ValueError("Minimal bin height too large for the number of bins")
123
+
124
+ widths = F.softmax(unnormalized_widths, dim=-1)
125
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
126
+ cumwidths = torch.cumsum(widths, dim=-1)
127
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
128
+ cumwidths = (right - left) * cumwidths + left
129
+ cumwidths[..., 0] = left
130
+ cumwidths[..., -1] = right
131
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
132
+
133
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
134
+
135
+ heights = F.softmax(unnormalized_heights, dim=-1)
136
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
137
+ cumheights = torch.cumsum(heights, dim=-1)
138
+ cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
139
+ cumheights = (top - bottom) * cumheights + bottom
140
+ cumheights[..., 0] = bottom
141
+ cumheights[..., -1] = top
142
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
143
+
144
+ if inverse:
145
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
146
+ else:
147
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
148
+
149
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
150
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
151
+
152
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
153
+ delta = heights / widths
154
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
155
+
156
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
157
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
158
+
159
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
160
+
161
+ if inverse:
162
+ a = (inputs - input_cumheights) * (
163
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
164
+ ) + input_heights * (input_delta - input_derivatives)
165
+ b = input_heights * input_derivatives - (inputs - input_cumheights) * (
166
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
167
+ )
168
+ c = -input_delta * (inputs - input_cumheights)
169
+
170
+ discriminant = b.pow(2) - 4 * a * c
171
+ assert (discriminant >= 0).all()
172
+
173
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
174
+ outputs = root * input_bin_widths + input_cumwidths
175
+
176
+ theta_one_minus_theta = root * (1 - root)
177
+ denominator = input_delta + (
178
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
179
+ * theta_one_minus_theta
180
+ )
181
+ derivative_numerator = input_delta.pow(2) * (
182
+ input_derivatives_plus_one * root.pow(2)
183
+ + 2 * input_delta * theta_one_minus_theta
184
+ + input_derivatives * (1 - root).pow(2)
185
+ )
186
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
187
+
188
+ return outputs, -logabsdet
189
+ else:
190
+ theta = (inputs - input_cumwidths) / input_bin_widths
191
+ theta_one_minus_theta = theta * (1 - theta)
192
+
193
+ numerator = input_heights * (
194
+ input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
195
+ )
196
+ denominator = input_delta + (
197
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
198
+ * theta_one_minus_theta
199
+ )
200
+ outputs = input_cumheights + numerator / denominator
201
+
202
+ derivative_numerator = input_delta.pow(2) * (
203
+ input_derivatives_plus_one * theta.pow(2)
204
+ + 2 * input_delta * theta_one_minus_theta
205
+ + input_derivatives * (1 - theta).pow(2)
206
+ )
207
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
208
+
209
+ return outputs, logabsdet
BanG-Dream-MyGO/requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wheel
2
+ setuptools
3
+ ffmpeg
4
+ torch
5
+ numba==0.56.4
6
+ numpy==1.23.5
7
+ scipy==1.9.3
8
+ librosa==0.9.1
9
+ fairseq==0.12.2
10
+ faiss-cpu==1.7.3
11
+ gradio==3.50.2
12
+ pyworld>=0.3.2
13
+ soundfile>=0.12.1
14
+ praat-parselmouth>=0.4.2
15
+ httpx
16
+ tensorboard
17
+ tensorboardX
18
+ torchcrepe
19
+ onnxruntime
20
+ demucs
21
+ edge-tts
22
+ yt_dlp
BanG-Dream-MyGO/rmvpe.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5ed4719f59085d1affc5d81354c70828c740584f2d24e782523345a6a278962
3
+ size 181189687
BanG-Dream-MyGO/rmvpe.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, torch, numpy as np, traceback, pdb
2
+ import torch.nn as nn
3
+ from time import time as ttime
4
+ import torch.nn.functional as F
5
+
6
+
7
+ class BiGRU(nn.Module):
8
+ def __init__(self, input_features, hidden_features, num_layers):
9
+ super(BiGRU, self).__init__()
10
+ self.gru = nn.GRU(
11
+ input_features,
12
+ hidden_features,
13
+ num_layers=num_layers,
14
+ batch_first=True,
15
+ bidirectional=True,
16
+ )
17
+
18
+ def forward(self, x):
19
+ return self.gru(x)[0]
20
+
21
+
22
+ class ConvBlockRes(nn.Module):
23
+ def __init__(self, in_channels, out_channels, momentum=0.01):
24
+ super(ConvBlockRes, self).__init__()
25
+ self.conv = nn.Sequential(
26
+ nn.Conv2d(
27
+ in_channels=in_channels,
28
+ out_channels=out_channels,
29
+ kernel_size=(3, 3),
30
+ stride=(1, 1),
31
+ padding=(1, 1),
32
+ bias=False,
33
+ ),
34
+ nn.BatchNorm2d(out_channels, momentum=momentum),
35
+ nn.ReLU(),
36
+ nn.Conv2d(
37
+ in_channels=out_channels,
38
+ out_channels=out_channels,
39
+ kernel_size=(3, 3),
40
+ stride=(1, 1),
41
+ padding=(1, 1),
42
+ bias=False,
43
+ ),
44
+ nn.BatchNorm2d(out_channels, momentum=momentum),
45
+ nn.ReLU(),
46
+ )
47
+ if in_channels != out_channels:
48
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
49
+ self.is_shortcut = True
50
+ else:
51
+ self.is_shortcut = False
52
+
53
+ def forward(self, x):
54
+ if self.is_shortcut:
55
+ return self.conv(x) + self.shortcut(x)
56
+ else:
57
+ return self.conv(x) + x
58
+
59
+
60
+ class Encoder(nn.Module):
61
+ def __init__(
62
+ self,
63
+ in_channels,
64
+ in_size,
65
+ n_encoders,
66
+ kernel_size,
67
+ n_blocks,
68
+ out_channels=16,
69
+ momentum=0.01,
70
+ ):
71
+ super(Encoder, self).__init__()
72
+ self.n_encoders = n_encoders
73
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
74
+ self.layers = nn.ModuleList()
75
+ self.latent_channels = []
76
+ for i in range(self.n_encoders):
77
+ self.layers.append(
78
+ ResEncoderBlock(
79
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
80
+ )
81
+ )
82
+ self.latent_channels.append([out_channels, in_size])
83
+ in_channels = out_channels
84
+ out_channels *= 2
85
+ in_size //= 2
86
+ self.out_size = in_size
87
+ self.out_channel = out_channels
88
+
89
+ def forward(self, x):
90
+ concat_tensors = []
91
+ x = self.bn(x)
92
+ for i in range(self.n_encoders):
93
+ _, x = self.layers[i](x)
94
+ concat_tensors.append(_)
95
+ return x, concat_tensors
96
+
97
+
98
+ class ResEncoderBlock(nn.Module):
99
+ def __init__(
100
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
101
+ ):
102
+ super(ResEncoderBlock, self).__init__()
103
+ self.n_blocks = n_blocks
104
+ self.conv = nn.ModuleList()
105
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
106
+ for i in range(n_blocks - 1):
107
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
108
+ self.kernel_size = kernel_size
109
+ if self.kernel_size is not None:
110
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
111
+
112
+ def forward(self, x):
113
+ for i in range(self.n_blocks):
114
+ x = self.conv[i](x)
115
+ if self.kernel_size is not None:
116
+ return x, self.pool(x)
117
+ else:
118
+ return x
119
+
120
+
121
+ class Intermediate(nn.Module): #
122
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
123
+ super(Intermediate, self).__init__()
124
+ self.n_inters = n_inters
125
+ self.layers = nn.ModuleList()
126
+ self.layers.append(
127
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
128
+ )
129
+ for i in range(self.n_inters - 1):
130
+ self.layers.append(
131
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
132
+ )
133
+
134
+ def forward(self, x):
135
+ for i in range(self.n_inters):
136
+ x = self.layers[i](x)
137
+ return x
138
+
139
+
140
+ class ResDecoderBlock(nn.Module):
141
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
142
+ super(ResDecoderBlock, self).__init__()
143
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
144
+ self.n_blocks = n_blocks
145
+ self.conv1 = nn.Sequential(
146
+ nn.ConvTranspose2d(
147
+ in_channels=in_channels,
148
+ out_channels=out_channels,
149
+ kernel_size=(3, 3),
150
+ stride=stride,
151
+ padding=(1, 1),
152
+ output_padding=out_padding,
153
+ bias=False,
154
+ ),
155
+ nn.BatchNorm2d(out_channels, momentum=momentum),
156
+ nn.ReLU(),
157
+ )
158
+ self.conv2 = nn.ModuleList()
159
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
160
+ for i in range(n_blocks - 1):
161
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
162
+
163
+ def forward(self, x, concat_tensor):
164
+ x = self.conv1(x)
165
+ x = torch.cat((x, concat_tensor), dim=1)
166
+ for i in range(self.n_blocks):
167
+ x = self.conv2[i](x)
168
+ return x
169
+
170
+
171
+ class Decoder(nn.Module):
172
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
173
+ super(Decoder, self).__init__()
174
+ self.layers = nn.ModuleList()
175
+ self.n_decoders = n_decoders
176
+ for i in range(self.n_decoders):
177
+ out_channels = in_channels // 2
178
+ self.layers.append(
179
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
180
+ )
181
+ in_channels = out_channels
182
+
183
+ def forward(self, x, concat_tensors):
184
+ for i in range(self.n_decoders):
185
+ x = self.layers[i](x, concat_tensors[-1 - i])
186
+ return x
187
+
188
+
189
+ class DeepUnet(nn.Module):
190
+ def __init__(
191
+ self,
192
+ kernel_size,
193
+ n_blocks,
194
+ en_de_layers=5,
195
+ inter_layers=4,
196
+ in_channels=1,
197
+ en_out_channels=16,
198
+ ):
199
+ super(DeepUnet, self).__init__()
200
+ self.encoder = Encoder(
201
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
202
+ )
203
+ self.intermediate = Intermediate(
204
+ self.encoder.out_channel // 2,
205
+ self.encoder.out_channel,
206
+ inter_layers,
207
+ n_blocks,
208
+ )
209
+ self.decoder = Decoder(
210
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
211
+ )
212
+
213
+ def forward(self, x):
214
+ x, concat_tensors = self.encoder(x)
215
+ x = self.intermediate(x)
216
+ x = self.decoder(x, concat_tensors)
217
+ return x
218
+
219
+
220
+ class E2E(nn.Module):
221
+ def __init__(
222
+ self,
223
+ n_blocks,
224
+ n_gru,
225
+ kernel_size,
226
+ en_de_layers=5,
227
+ inter_layers=4,
228
+ in_channels=1,
229
+ en_out_channels=16,
230
+ ):
231
+ super(E2E, self).__init__()
232
+ self.unet = DeepUnet(
233
+ kernel_size,
234
+ n_blocks,
235
+ en_de_layers,
236
+ inter_layers,
237
+ in_channels,
238
+ en_out_channels,
239
+ )
240
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
241
+ if n_gru:
242
+ self.fc = nn.Sequential(
243
+ BiGRU(3 * 128, 256, n_gru),
244
+ nn.Linear(512, 360),
245
+ nn.Dropout(0.25),
246
+ nn.Sigmoid(),
247
+ )
248
+ else:
249
+ self.fc = nn.Sequential(
250
+ nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
251
+ )
252
+
253
+ def forward(self, mel):
254
+ mel = mel.transpose(-1, -2).unsqueeze(1)
255
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
256
+ x = self.fc(x)
257
+ return x
258
+
259
+
260
+ from librosa.filters import mel
261
+
262
+
263
+ class MelSpectrogram(torch.nn.Module):
264
+ def __init__(
265
+ self,
266
+ is_half,
267
+ n_mel_channels,
268
+ sampling_rate,
269
+ win_length,
270
+ hop_length,
271
+ n_fft=None,
272
+ mel_fmin=0,
273
+ mel_fmax=None,
274
+ clamp=1e-5,
275
+ ):
276
+ super().__init__()
277
+ n_fft = win_length if n_fft is None else n_fft
278
+ self.hann_window = {}
279
+ mel_basis = mel(
280
+ sr=sampling_rate,
281
+ n_fft=n_fft,
282
+ n_mels=n_mel_channels,
283
+ fmin=mel_fmin,
284
+ fmax=mel_fmax,
285
+ htk=True,
286
+ )
287
+ mel_basis = torch.from_numpy(mel_basis).float()
288
+ self.register_buffer("mel_basis", mel_basis)
289
+ self.n_fft = win_length if n_fft is None else n_fft
290
+ self.hop_length = hop_length
291
+ self.win_length = win_length
292
+ self.sampling_rate = sampling_rate
293
+ self.n_mel_channels = n_mel_channels
294
+ self.clamp = clamp
295
+ self.is_half = is_half
296
+
297
+ def forward(self, audio, keyshift=0, speed=1, center=True):
298
+ factor = 2 ** (keyshift / 12)
299
+ n_fft_new = int(np.round(self.n_fft * factor))
300
+ win_length_new = int(np.round(self.win_length * factor))
301
+ hop_length_new = int(np.round(self.hop_length * speed))
302
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
303
+ if keyshift_key not in self.hann_window:
304
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
305
+ audio.device
306
+ )
307
+ fft = torch.stft(
308
+ audio,
309
+ n_fft=n_fft_new,
310
+ hop_length=hop_length_new,
311
+ win_length=win_length_new,
312
+ window=self.hann_window[keyshift_key],
313
+ center=center,
314
+ return_complex=True,
315
+ )
316
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
317
+ if keyshift != 0:
318
+ size = self.n_fft // 2 + 1
319
+ resize = magnitude.size(1)
320
+ if resize < size:
321
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
322
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
323
+ mel_output = torch.matmul(self.mel_basis, magnitude)
324
+ if self.is_half == True:
325
+ mel_output = mel_output.half()
326
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
327
+ return log_mel_spec
328
+
329
+
330
+ class RMVPE:
331
+ def __init__(self, model_path, is_half, device=None):
332
+ self.resample_kernel = {}
333
+ model = E2E(4, 1, (2, 2))
334
+ ckpt = torch.load(model_path, map_location="cpu")
335
+ model.load_state_dict(ckpt)
336
+ model.eval()
337
+ if is_half == True:
338
+ model = model.half()
339
+ self.model = model
340
+ self.resample_kernel = {}
341
+ self.is_half = is_half
342
+ if device is None:
343
+ device = "cuda" if torch.cuda.is_available() else "cpu"
344
+ self.device = device
345
+ self.mel_extractor = MelSpectrogram(
346
+ is_half, 128, 16000, 1024, 160, None, 30, 8000
347
+ ).to(device)
348
+ self.model = self.model.to(device)
349
+ cents_mapping = 20 * np.arange(360) + 1997.3794084376191
350
+ self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
351
+
352
+ def mel2hidden(self, mel):
353
+ with torch.no_grad():
354
+ n_frames = mel.shape[-1]
355
+ mel = F.pad(
356
+ mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
357
+ )
358
+ hidden = self.model(mel)
359
+ return hidden[:, :n_frames]
360
+
361
+ def decode(self, hidden, thred=0.03):
362
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
363
+ f0 = 10 * (2 ** (cents_pred / 1200))
364
+ f0[f0 == 10] = 0
365
+ # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
366
+ return f0
367
+
368
+ def infer_from_audio(self, audio, thred=0.03):
369
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
370
+ # torch.cuda.synchronize()
371
+ # t0=ttime()
372
+ mel = self.mel_extractor(audio, center=True)
373
+ # torch.cuda.synchronize()
374
+ # t1=ttime()
375
+ hidden = self.mel2hidden(mel)
376
+ # torch.cuda.synchronize()
377
+ # t2=ttime()
378
+ hidden = hidden.squeeze(0).cpu().numpy()
379
+ if self.is_half == True:
380
+ hidden = hidden.astype("float32")
381
+ f0 = self.decode(hidden, thred=thred)
382
+ # torch.cuda.synchronize()
383
+ # t3=ttime()
384
+ # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
385
+ return f0
386
+
387
+ def to_local_average_cents(self, salience, thred=0.05):
388
+ # t0 = ttime()
389
+ center = np.argmax(salience, axis=1) # 帧长#index
390
+ salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
391
+ # t1 = ttime()
392
+ center += 4
393
+ todo_salience = []
394
+ todo_cents_mapping = []
395
+ starts = center - 4
396
+ ends = center + 5
397
+ for idx in range(salience.shape[0]):
398
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
399
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
400
+ # t2 = ttime()
401
+ todo_salience = np.array(todo_salience) # 帧长,9
402
+ todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
403
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
404
+ weight_sum = np.sum(todo_salience, 1) # 帧长
405
+ devided = product_sum / weight_sum # 帧长
406
+ # t3 = ttime()
407
+ maxx = np.max(salience, axis=1) # 帧长
408
+ devided[maxx <= thred] = 0
409
+ # t4 = ttime()
410
+ # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
411
+ return devided
412
+
413
+
414
+ # if __name__ == '__main__':
415
+ # audio, sampling_rate = sf.read("卢本伟语录~1.wav")
416
+ # if len(audio.shape) > 1:
417
+ # audio = librosa.to_mono(audio.transpose(1, 0))
418
+ # audio_bak = audio.copy()
419
+ # if sampling_rate != 16000:
420
+ # audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
421
+ # model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt"
422
+ # thred = 0.03 # 0.01
423
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
424
+ # rmvpe = RMVPE(model_path,is_half=False, device=device)
425
+ # t0=ttime()
426
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
427
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
428
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
429
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
430
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
431
+ # t1=ttime()
432
+ # print(f0.shape,t1-t0)
BanG-Dream-MyGO/vc_infer_pipeline.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np, parselmouth, torch, pdb, sys, os
2
+ from time import time as ttime
3
+ import torch.nn.functional as F
4
+ import scipy.signal as signal
5
+ import pyworld, os, traceback, faiss, librosa, torchcrepe
6
+ from scipy import signal
7
+ from functools import lru_cache
8
+
9
+ now_dir = os.getcwd()
10
+ sys.path.append(now_dir)
11
+
12
+ bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
13
+
14
+ input_audio_path2wav = {}
15
+
16
+
17
+ @lru_cache
18
+ def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
19
+ audio = input_audio_path2wav[input_audio_path]
20
+ f0, t = pyworld.harvest(
21
+ audio,
22
+ fs=fs,
23
+ f0_ceil=f0max,
24
+ f0_floor=f0min,
25
+ frame_period=frame_period,
26
+ )
27
+ f0 = pyworld.stonemask(audio, f0, t, fs)
28
+ return f0
29
+
30
+
31
+ def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
32
+ # print(data1.max(),data2.max())
33
+ rms1 = librosa.feature.rms(
34
+ y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
35
+ ) # 每半秒一个点
36
+ rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
37
+ rms1 = torch.from_numpy(rms1)
38
+ rms1 = F.interpolate(
39
+ rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
40
+ ).squeeze()
41
+ rms2 = torch.from_numpy(rms2)
42
+ rms2 = F.interpolate(
43
+ rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
44
+ ).squeeze()
45
+ rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
46
+ data2 *= (
47
+ torch.pow(rms1, torch.tensor(1 - rate))
48
+ * torch.pow(rms2, torch.tensor(rate - 1))
49
+ ).numpy()
50
+ return data2
51
+
52
+
53
+ class VC(object):
54
+ def __init__(self, tgt_sr, config):
55
+ self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
56
+ config.x_pad,
57
+ config.x_query,
58
+ config.x_center,
59
+ config.x_max,
60
+ config.is_half,
61
+ )
62
+ self.sr = 16000 # hubert输入采样率
63
+ self.window = 160 # 每帧点数
64
+ self.t_pad = self.sr * self.x_pad # 每条前后pad时间
65
+ self.t_pad_tgt = tgt_sr * self.x_pad
66
+ self.t_pad2 = self.t_pad * 2
67
+ self.t_query = self.sr * self.x_query # 查询切点前后查询时间
68
+ self.t_center = self.sr * self.x_center # 查询切点位置
69
+ self.t_max = self.sr * self.x_max # 免查询时长阈值
70
+ self.device = config.device
71
+
72
+ def get_f0(
73
+ self,
74
+ input_audio_path,
75
+ x,
76
+ p_len,
77
+ f0_up_key,
78
+ f0_method,
79
+ filter_radius,
80
+ inp_f0=None,
81
+ ):
82
+ global input_audio_path2wav
83
+ time_step = self.window / self.sr * 1000
84
+ f0_min = 50
85
+ f0_max = 1100
86
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
87
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
88
+ if f0_method == "pm":
89
+ f0 = (
90
+ parselmouth.Sound(x, self.sr)
91
+ .to_pitch_ac(
92
+ time_step=time_step / 1000,
93
+ voicing_threshold=0.6,
94
+ pitch_floor=f0_min,
95
+ pitch_ceiling=f0_max,
96
+ )
97
+ .selected_array["frequency"]
98
+ )
99
+ pad_size = (p_len - len(f0) + 1) // 2
100
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
101
+ f0 = np.pad(
102
+ f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
103
+ )
104
+ elif f0_method == "harvest":
105
+ input_audio_path2wav[input_audio_path] = x.astype(np.double)
106
+ f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
107
+ if filter_radius > 2:
108
+ f0 = signal.medfilt(f0, 3)
109
+ elif f0_method == "crepe":
110
+ model = "full"
111
+ # Pick a batch size that doesn't cause memory errors on your gpu
112
+ batch_size = 512
113
+ # Compute pitch using first gpu
114
+ audio = torch.tensor(np.copy(x))[None].float()
115
+ f0, pd = torchcrepe.predict(
116
+ audio,
117
+ self.sr,
118
+ self.window,
119
+ f0_min,
120
+ f0_max,
121
+ model,
122
+ batch_size=batch_size,
123
+ device=self.device,
124
+ return_periodicity=True,
125
+ )
126
+ pd = torchcrepe.filter.median(pd, 3)
127
+ f0 = torchcrepe.filter.mean(f0, 3)
128
+ f0[pd < 0.1] = 0
129
+ f0 = f0[0].cpu().numpy()
130
+ elif f0_method == "rmvpe":
131
+ if hasattr(self, "model_rmvpe") == False:
132
+ from rmvpe import RMVPE
133
+
134
+ print("loading rmvpe model")
135
+ self.model_rmvpe = RMVPE(
136
+ "rmvpe.pt", is_half=self.is_half, device=self.device
137
+ )
138
+ f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
139
+ f0 *= pow(2, f0_up_key / 12)
140
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
141
+ tf0 = self.sr // self.window # 每秒f0点数
142
+ if inp_f0 is not None:
143
+ delta_t = np.round(
144
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
145
+ ).astype("int16")
146
+ replace_f0 = np.interp(
147
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
148
+ )
149
+ shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
150
+ f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
151
+ :shape
152
+ ]
153
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
154
+ f0bak = f0.copy()
155
+ f0_mel = 1127 * np.log(1 + f0 / 700)
156
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
157
+ f0_mel_max - f0_mel_min
158
+ ) + 1
159
+ f0_mel[f0_mel <= 1] = 1
160
+ f0_mel[f0_mel > 255] = 255
161
+ f0_coarse = np.rint(f0_mel).astype(np.int)
162
+ return f0_coarse, f0bak # 1-0
163
+
164
+ def vc(
165
+ self,
166
+ model,
167
+ net_g,
168
+ sid,
169
+ audio0,
170
+ pitch,
171
+ pitchf,
172
+ times,
173
+ index,
174
+ big_npy,
175
+ index_rate,
176
+ version,
177
+ protect,
178
+ ): # ,file_index,file_big_npy
179
+ feats = torch.from_numpy(audio0)
180
+ if self.is_half:
181
+ feats = feats.half()
182
+ else:
183
+ feats = feats.float()
184
+ if feats.dim() == 2: # double channels
185
+ feats = feats.mean(-1)
186
+ assert feats.dim() == 1, feats.dim()
187
+ feats = feats.view(1, -1)
188
+ padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
189
+
190
+ inputs = {
191
+ "source": feats.to(self.device),
192
+ "padding_mask": padding_mask,
193
+ "output_layer": 9 if version == "v1" else 12,
194
+ }
195
+ t0 = ttime()
196
+ with torch.no_grad():
197
+ logits = model.extract_features(**inputs)
198
+ feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
199
+ if protect < 0.5 and pitch != None and pitchf != None:
200
+ feats0 = feats.clone()
201
+ if (
202
+ isinstance(index, type(None)) == False
203
+ and isinstance(big_npy, type(None)) == False
204
+ and index_rate != 0
205
+ ):
206
+ npy = feats[0].cpu().numpy()
207
+ if self.is_half:
208
+ npy = npy.astype("float32")
209
+
210
+ # _, I = index.search(npy, 1)
211
+ # npy = big_npy[I.squeeze()]
212
+
213
+ score, ix = index.search(npy, k=8)
214
+ weight = np.square(1 / score)
215
+ weight /= weight.sum(axis=1, keepdims=True)
216
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
217
+
218
+ if self.is_half:
219
+ npy = npy.astype("float16")
220
+ feats = (
221
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
222
+ + (1 - index_rate) * feats
223
+ )
224
+
225
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
226
+ if protect < 0.5 and pitch != None and pitchf != None:
227
+ feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
228
+ 0, 2, 1
229
+ )
230
+ t1 = ttime()
231
+ p_len = audio0.shape[0] // self.window
232
+ if feats.shape[1] < p_len:
233
+ p_len = feats.shape[1]
234
+ if pitch != None and pitchf != None:
235
+ pitch = pitch[:, :p_len]
236
+ pitchf = pitchf[:, :p_len]
237
+
238
+ if protect < 0.5 and pitch != None and pitchf != None:
239
+ pitchff = pitchf.clone()
240
+ pitchff[pitchf > 0] = 1
241
+ pitchff[pitchf < 1] = protect
242
+ pitchff = pitchff.unsqueeze(-1)
243
+ feats = feats * pitchff + feats0 * (1 - pitchff)
244
+ feats = feats.to(feats0.dtype)
245
+ p_len = torch.tensor([p_len], device=self.device).long()
246
+ with torch.no_grad():
247
+ if pitch != None and pitchf != None:
248
+ audio1 = (
249
+ (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
250
+ .data.cpu()
251
+ .float()
252
+ .numpy()
253
+ )
254
+ else:
255
+ audio1 = (
256
+ (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
257
+ )
258
+ del feats, p_len, padding_mask
259
+ if torch.cuda.is_available():
260
+ torch.cuda.empty_cache()
261
+ t2 = ttime()
262
+ times[0] += t1 - t0
263
+ times[2] += t2 - t1
264
+ return audio1
265
+
266
+ def pipeline(
267
+ self,
268
+ model,
269
+ net_g,
270
+ sid,
271
+ audio,
272
+ input_audio_path,
273
+ times,
274
+ f0_up_key,
275
+ f0_method,
276
+ file_index,
277
+ # file_big_npy,
278
+ index_rate,
279
+ if_f0,
280
+ filter_radius,
281
+ tgt_sr,
282
+ resample_sr,
283
+ rms_mix_rate,
284
+ version,
285
+ protect,
286
+ f0_file=None,
287
+ ):
288
+ if (
289
+ file_index != ""
290
+ # and file_big_npy != ""
291
+ # and os.path.exists(file_big_npy) == True
292
+ and os.path.exists(file_index) == True
293
+ and index_rate != 0
294
+ ):
295
+ try:
296
+ index = faiss.read_index(file_index)
297
+ # big_npy = np.load(file_big_npy)
298
+ big_npy = index.reconstruct_n(0, index.ntotal)
299
+ except:
300
+ traceback.print_exc()
301
+ index = big_npy = None
302
+ else:
303
+ index = big_npy = None
304
+ audio = signal.filtfilt(bh, ah, audio)
305
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
306
+ opt_ts = []
307
+ if audio_pad.shape[0] > self.t_max:
308
+ audio_sum = np.zeros_like(audio)
309
+ for i in range(self.window):
310
+ audio_sum += audio_pad[i : i - self.window]
311
+ for t in range(self.t_center, audio.shape[0], self.t_center):
312
+ opt_ts.append(
313
+ t
314
+ - self.t_query
315
+ + np.where(
316
+ np.abs(audio_sum[t - self.t_query : t + self.t_query])
317
+ == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
318
+ )[0][0]
319
+ )
320
+ s = 0
321
+ audio_opt = []
322
+ t = None
323
+ t1 = ttime()
324
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
325
+ p_len = audio_pad.shape[0] // self.window
326
+ inp_f0 = None
327
+ if hasattr(f0_file, "name") == True:
328
+ try:
329
+ with open(f0_file.name, "r") as f:
330
+ lines = f.read().strip("\n").split("\n")
331
+ inp_f0 = []
332
+ for line in lines:
333
+ inp_f0.append([float(i) for i in line.split(",")])
334
+ inp_f0 = np.array(inp_f0, dtype="float32")
335
+ except:
336
+ traceback.print_exc()
337
+ sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
338
+ pitch, pitchf = None, None
339
+ if if_f0 == 1:
340
+ pitch, pitchf = self.get_f0(
341
+ input_audio_path,
342
+ audio_pad,
343
+ p_len,
344
+ f0_up_key,
345
+ f0_method,
346
+ filter_radius,
347
+ inp_f0,
348
+ )
349
+ pitch = pitch[:p_len]
350
+ pitchf = pitchf[:p_len]
351
+ if self.device == "mps":
352
+ pitchf = pitchf.astype(np.float32)
353
+ pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
354
+ pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
355
+ t2 = ttime()
356
+ times[1] += t2 - t1
357
+ for t in opt_ts:
358
+ t = t // self.window * self.window
359
+ if if_f0 == 1:
360
+ audio_opt.append(
361
+ self.vc(
362
+ model,
363
+ net_g,
364
+ sid,
365
+ audio_pad[s : t + self.t_pad2 + self.window],
366
+ pitch[:, s // self.window : (t + self.t_pad2) // self.window],
367
+ pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
368
+ times,
369
+ index,
370
+ big_npy,
371
+ index_rate,
372
+ version,
373
+ protect,
374
+ )[self.t_pad_tgt : -self.t_pad_tgt]
375
+ )
376
+ else:
377
+ audio_opt.append(
378
+ self.vc(
379
+ model,
380
+ net_g,
381
+ sid,
382
+ audio_pad[s : t + self.t_pad2 + self.window],
383
+ None,
384
+ None,
385
+ times,
386
+ index,
387
+ big_npy,
388
+ index_rate,
389
+ version,
390
+ protect,
391
+ )[self.t_pad_tgt : -self.t_pad_tgt]
392
+ )
393
+ s = t
394
+ if if_f0 == 1:
395
+ audio_opt.append(
396
+ self.vc(
397
+ model,
398
+ net_g,
399
+ sid,
400
+ audio_pad[t:],
401
+ pitch[:, t // self.window :] if t is not None else pitch,
402
+ pitchf[:, t // self.window :] if t is not None else pitchf,
403
+ times,
404
+ index,
405
+ big_npy,
406
+ index_rate,
407
+ version,
408
+ protect,
409
+ )[self.t_pad_tgt : -self.t_pad_tgt]
410
+ )
411
+ else:
412
+ audio_opt.append(
413
+ self.vc(
414
+ model,
415
+ net_g,
416
+ sid,
417
+ audio_pad[t:],
418
+ None,
419
+ None,
420
+ times,
421
+ index,
422
+ big_npy,
423
+ index_rate,
424
+ version,
425
+ protect,
426
+ )[self.t_pad_tgt : -self.t_pad_tgt]
427
+ )
428
+ audio_opt = np.concatenate(audio_opt)
429
+ if rms_mix_rate != 1:
430
+ audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
431
+ if resample_sr >= 16000 and tgt_sr != resample_sr:
432
+ audio_opt = librosa.resample(
433
+ audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
434
+ )
435
+ audio_max = np.abs(audio_opt).max() / 0.99
436
+ max_int16 = 32768
437
+ if audio_max > 1:
438
+ max_int16 /= audio_max
439
+ audio_opt = (audio_opt * max_int16).astype(np.int16)
440
+ del pitch, pitchf, sid
441
+ if torch.cuda.is_available():
442
+ torch.cuda.empty_cache()
443
+ return audio_opt
Bocchi-the-Rock/Bocchi Chan.gif ADDED

Git LFS Details

  • SHA256: 381cd3154eb2e1eb9febcdbcdcf1183c00249dc5c74dd86ebe41da965dd8bc93
  • Pointer size: 132 Bytes
  • Size of remote file: 3.35 MB
Bocchi-the-Rock/Bocchi-the-Rock.PNG ADDED

Git LFS Details

  • SHA256: b190b4910a61e28f577bb4a7f0df250299c603363f022065d70f949dace22685
  • Pointer size: 131 Bytes
  • Size of remote file: 121 kB
Bocchi-the-Rock/Dockerfile ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Gunakan base image dengan hash agar match cache Hugging Face
2
+ FROM python:3.10@sha256:875c3591e586f66aa65621926230925144920c951902a6c2eef005d9783a7ca7
3
+
4
+ # Gunakan root dulu buat install awal
5
+ USER root
6
+
7
+ # Pasang fakeroot + ubah apt-get, lalu buat user UID 1000
8
+ RUN apt-get update && apt-get install -y fakeroot && \
9
+ mv /usr/bin/apt-get /usr/bin/.apt-get && \
10
+ echo '#!/usr/bin/env sh\nfakeroot /usr/bin/.apt-get "$@"' > /usr/bin/apt-get && \
11
+ chmod +x /usr/bin/apt-get && \
12
+ rm -rf /var/lib/apt/lists/* && \
13
+ useradd -m -u 1000 user
14
+
15
+ # Install dependencies umum untuk ML / Gradio / media processing
16
+ RUN apt-get update && apt-get install -y \
17
+ git \
18
+ git-lfs \
19
+ ffmpeg \
20
+ libsm6 \
21
+ libxext6 \
22
+ libgl1-mesa-glx \
23
+ cmake \
24
+ rsync \
25
+ && rm -rf /var/lib/apt/lists/* && \
26
+ git lfs install
27
+
28
+ # Switch ke user Hugging Face standard (UID 1000)
29
+ USER user
30
+ ENV HOME=/home/user \
31
+ PATH=$HOME/.local/bin:$PATH
32
+
33
+ WORKDIR $HOME/app
34
+
35
+ # Install pip versi 24.0 secara eksplisit
36
+ RUN pip install --no-cache-dir pip==24.0
37
+
38
+ # Salin requirements.txt ke tempat sementara
39
+ COPY --chown=1000:1000 requirements.txt /tmp/pre-requirements.txt
40
+
41
+ # Install Python dependencies dari project
42
+ RUN pip install --no-cache-dir -r /tmp/pre-requirements.txt
43
+
44
+ # Salin seluruh kode project
45
+ COPY --link --chown=1000:1000 . .
46
+
47
+ # Simpan semua dependency ke freeze file (buat cache HF)
48
+ RUN pip freeze > /tmp/freeze.txt
49
+
50
+ # Expose port default Gradio / FastAPI
51
+ EXPOSE 7860
52
+
53
+ # Jalankan app Python
54
+ CMD ["python3", "app.py", "--api"]
Bocchi-the-Rock/Dockerfile.txt ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Gunakan base image dengan hash agar match cache Hugging Face
2
+ FROM python:3.10@sha256:875c3591e586f66aa65621926230925144920c951902a6c2eef005d9783a7ca7
3
+
4
+ # Gunakan root dulu buat install awal
5
+ USER root
6
+
7
+ # Pasang fakeroot + ubah apt-get, lalu buat user UID 1000
8
+ RUN apt-get update && apt-get install -y fakeroot && \
9
+ mv /usr/bin/apt-get /usr/bin/.apt-get && \
10
+ echo '#!/usr/bin/env sh\nfakeroot /usr/bin/.apt-get "$@"' > /usr/bin/apt-get && \
11
+ chmod +x /usr/bin/apt-get && \
12
+ rm -rf /var/lib/apt/lists/* && \
13
+ useradd -m -u 1000 user
14
+
15
+ # Install dependencies umum untuk ML / Gradio / media processing
16
+ RUN apt-get update && apt-get install -y \
17
+ git \
18
+ git-lfs \
19
+ ffmpeg \
20
+ libsm6 \
21
+ libxext6 \
22
+ libgl1-mesa-glx \
23
+ cmake \
24
+ rsync \
25
+ && rm -rf /var/lib/apt/lists/* && \
26
+ git lfs install
27
+
28
+ # Switch ke user Hugging Face standard (UID 1000)
29
+ USER user
30
+ ENV HOME=/home/user \
31
+ PATH=$HOME/.local/bin:$PATH
32
+
33
+ WORKDIR $HOME/app
34
+
35
+ # Install pip versi 24.0 secara eksplisit
36
+ RUN pip install --no-cache-dir pip==24.0
37
+
38
+ # Salin requirements.txt ke tempat sementara
39
+ COPY --chown=1000:1000 requirements.txt /tmp/pre-requirements.txt
40
+
41
+ # Install Python dependencies dari project
42
+ RUN pip install --no-cache-dir -r /tmp/pre-requirements.txt
43
+
44
+ # Salin seluruh kode project
45
+ COPY --link --chown=1000:1000 . .
46
+
47
+ # Simpan semua dependency ke freeze file (buat cache HF)
48
+ RUN pip freeze > /tmp/freeze.txt
49
+
50
+ # Expose port default Gradio / FastAPI
51
+ EXPOSE 7860
52
+
53
+ # Jalankan app Python
54
+ CMD ["python3", "app.py", "--api"]
Bocchi-the-Rock/app.py ADDED
@@ -0,0 +1,922 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import traceback
4
+ import logging
5
+ import gradio as gr
6
+ import numpy as np
7
+ import librosa
8
+ import torch
9
+ import asyncio
10
+ import edge_tts
11
+ import re
12
+ import shutil
13
+ import time
14
+ from datetime import datetime
15
+ from fairseq import checkpoint_utils
16
+ from fairseq.data.dictionary import Dictionary
17
+ from lib.infer_pack.models import (
18
+ SynthesizerTrnMs256NSFsid,
19
+ SynthesizerTrnMs256NSFsid_nono,
20
+ SynthesizerTrnMs768NSFsid,
21
+ SynthesizerTrnMs768NSFsid_nono,
22
+ )
23
+ from vc_infer_pipeline import VC
24
+ from config import Config
25
+
26
+ # =============================
27
+ # LOAD ENVIRONMENT VARIABLES
28
+ # =============================
29
+ from dotenv import load_dotenv
30
+ load_dotenv()
31
+
32
+ HF_TOKEN = os.getenv("HF_TOKEN")
33
+ if HF_TOKEN:
34
+ print("🔑 Hugging Face token detected")
35
+ os.environ["HUGGINGFACE_TOKEN"] = HF_TOKEN
36
+ else:
37
+ print("⚠️ No HF_TOKEN found")
38
+
39
+ # =============================
40
+ # AUTO-DOWNLOAD DARI HUGGING FACE - DIPERBAIKI
41
+ # =============================
42
+ def download_required_weights():
43
+ """Fungsi untuk download model dari Hugging Face"""
44
+ print("=" * 50)
45
+ print("🚀 BLUE ARCHIVE VOICE CONVERSION v2.0")
46
+ print("=" * 50)
47
+
48
+ target_dir = "weights/Bocchi-the-Rock"
49
+
50
+ # Cek jika model sudah ada
51
+ if os.path.exists(target_dir):
52
+ print(f"📁 Checking existing models in: {target_dir}")
53
+ model_files = []
54
+ for root, dirs, files in os.walk(target_dir):
55
+ for file in files:
56
+ if file.endswith(".pth"):
57
+ model_files.append(os.path.join(root, file))
58
+
59
+ if len(model_files) >= 8:
60
+ print(f"✅ Models already exist: {len(model_files)} .pth files found")
61
+ print("📊 Listing available models:")
62
+ for m in model_files:
63
+ print(f" - {os.path.basename(m)}")
64
+ return True
65
+ else:
66
+ print(f"⚠️ Incomplete models: {len(model_files)}/8 .pth files found")
67
+
68
+ try:
69
+ from huggingface_hub import snapshot_download
70
+
71
+ repo_id = "Plana-Archive/Premium-Model"
72
+ print(f"📥 Downloading from: {repo_id}")
73
+ print("📁 Looking for: Bocchi the Rock! - RCV/weights/Bocchi-the-Rock")
74
+
75
+ # Download dengan pattern yang lebih spesifik
76
+ downloaded_path = snapshot_download(
77
+ repo_id=repo_id,
78
+ allow_patterns=[
79
+ "Bocchi the Rock! - RCV/weights/Bocchi-the-Rock/**",
80
+ "**/folder_info.json",
81
+ "**/model_info.json"
82
+ ],
83
+ local_dir=".",
84
+ local_dir_use_symlinks=False,
85
+ token=HF_TOKEN,
86
+ max_workers=2
87
+ )
88
+
89
+ print("✅ Download completed")
90
+
91
+ # Pindahkan file
92
+ source_dir = "Bocchi the Rock! - RCV/weights/Bocchi-the-Rock"
93
+
94
+ if os.path.exists(source_dir):
95
+ os.makedirs("weights", exist_ok=True)
96
+
97
+ if os.path.exists(target_dir):
98
+ print("📦 Removing old weights folder...")
99
+ shutil.rmtree(target_dir)
100
+
101
+ print(f"📂 Moving models to: {target_dir}")
102
+ shutil.move(source_dir, target_dir)
103
+
104
+ # Cek isi folder setelah dipindahkan
105
+ print("\n📊 Verifying downloaded models:")
106
+ for root, dirs, files in os.walk(target_dir):
107
+ for dir_name in dirs:
108
+ dir_path = os.path.join(root, dir_name)
109
+ pth_files = [f for f in os.listdir(dir_path) if f.endswith('.pth')]
110
+ index_files = [f for f in os.listdir(dir_path) if f.endswith('.index')]
111
+ image_files = [f for f in os.listdir(dir_path) if f.endswith(('.png', '.jpg', '.jpeg'))]
112
+
113
+ if pth_files:
114
+ print(f" 📁 {dir_name}:")
115
+ print(f" Model: {pth_files[0] if pth_files else 'NOT FOUND'}")
116
+ print(f" Index: {index_files[0] if index_files else 'NOT FOUND'}")
117
+ print(f" Cover: {image_files[0] if image_files else 'NOT FOUND'}")
118
+
119
+ # Hapus folder sumber
120
+ try:
121
+ if os.path.exists("Bocchi the Rock! - RCV"):
122
+ shutil.rmtree("Bocchi the Rock! - RCV")
123
+ except:
124
+ pass
125
+
126
+ # Update model_info.json dengan nama file yang sebenarnya
127
+ update_model_info_with_actual_files(target_dir)
128
+
129
+ return True
130
+ else:
131
+ print("❌ Source directory not found after download!")
132
+ return False
133
+
134
+ except Exception as e:
135
+ print(f"⚠️ Download failed: {str(e)}")
136
+ print("\n📝 Manual setup:")
137
+ print("1. Create folder: weights/Bocchi-the-Rock/")
138
+ print("2. Download from: https://huggingface.co/Plana-Archive/Anime-RCV")
139
+ print("3. Look for: Bocchi the Rock! - RCV/weights/Bocchi-the-Rock")
140
+ print("4. Put each character in their own folder")
141
+
142
+ return False
143
+
144
+ def update_model_info_with_actual_files(target_dir):
145
+ """Update model_info.json dengan nama file yang sebenarnya ada"""
146
+ model_info_path = os.path.join(target_dir, "model_info.json")
147
+
148
+ if not os.path.exists(model_info_path):
149
+ print("⚠️ model_info.json not found, creating default...")
150
+ # Buat model_info.json berdasarkan file yang ada
151
+ model_info = {}
152
+ for char_dir in os.listdir(target_dir):
153
+ char_path = os.path.join(target_dir, char_dir)
154
+ if os.path.isdir(char_path):
155
+ pth_files = [f for f in os.listdir(char_path) if f.endswith('.pth')]
156
+ index_files = [f for f in os.listdir(char_path) if f.endswith('.index')]
157
+ image_files = [f for f in os.listdir(char_path) if f.endswith(('.png', '.jpg', '.jpeg'))]
158
+
159
+ if pth_files:
160
+ model_info[char_dir] = {
161
+ "enable": True,
162
+ "model_path": pth_files[0],
163
+ "title": f"Bocchi the Rock! - {char_dir.replace('-', ' ')}",
164
+ "cover": image_files[0] if image_files else "cover.png",
165
+ "feature_retrieval_library": index_files[0] if index_files else f"{char_dir}.index",
166
+ "author": "Plana-Archive"
167
+ }
168
+
169
+ with open(model_info_path, "w", encoding="utf-8") as f:
170
+ json.dump(model_info, f, indent=2, ensure_ascii=False)
171
+ print(f"✅ Created model_info.json with {len(model_info)} characters")
172
+ else:
173
+ print("📄 Found model_info.json, checking for file mismatches...")
174
+ try:
175
+ with open(model_info_path, "r", encoding="utf-8") as f:
176
+ model_info = json.load(f)
177
+
178
+ updated = False
179
+ for char_name, info in model_info.items():
180
+ if not info.get('enable', True):
181
+ continue
182
+
183
+ char_path = os.path.join(target_dir, char_name)
184
+ if os.path.exists(char_path):
185
+ # Cek apakah file model ada
186
+ expected_model = info.get('model_path')
187
+ actual_models = [f for f in os.listdir(char_path) if f.endswith('.pth')]
188
+
189
+ if expected_model not in actual_models and actual_models:
190
+ print(f" 🔄 Updating {char_name}: {expected_model} → {actual_models[0]}")
191
+ info['model_path'] = actual_models[0]
192
+ updated = True
193
+
194
+ # Cek index file
195
+ expected_index = info.get('feature_retrieval_library')
196
+ actual_indices = [f for f in os.listdir(char_path) if f.endswith('.index')]
197
+
198
+ if expected_index not in actual_indices and actual_indices:
199
+ print(f" 🔄 Updating {char_name} index: {expected_index} → {actual_indices[0]}")
200
+ info['feature_retrieval_library'] = actual_indices[0]
201
+ updated = True
202
+
203
+ if updated:
204
+ with open(model_info_path, "w", encoding="utf-8") as f:
205
+ json.dump(model_info, f, indent=2, ensure_ascii=False)
206
+ print("✅ Updated model_info.json with actual file names")
207
+ except Exception as e:
208
+ print(f"⚠️ Error updating model_info.json: {str(e)}")
209
+
210
+ # Jalankan download
211
+ download_required_weights()
212
+
213
+ # Inisialisasi konfigurasi
214
+ config = Config()
215
+ logging.getLogger("numba").setLevel(logging.WARNING)
216
+ logging.getLogger("fairseq").setLevel(logging.WARNING)
217
+
218
+ # Cache untuk model
219
+ model_cache = {}
220
+ hubert_loaded = False
221
+ hubert_model = None
222
+
223
+ # Mode audio
224
+ spaces = True
225
+ if spaces:
226
+ audio_mode = ["Upload audio", "TTS Audio"]
227
+ else:
228
+ audio_mode = ["Input path", "Upload audio", "TTS Audio"]
229
+
230
+ # Metode F0 extraction
231
+ f0method_mode = ["pm", "harvest"]
232
+ if os.path.isfile("rmvpe.pt"):
233
+ f0method_mode.insert(2, "rmvpe")
234
+
235
+ def clean_title(title):
236
+ """Membersihkan judul model"""
237
+ title = re.sub(r'^Blue Archive\s*-\s*', '', title, flags=re.IGNORECASE)
238
+ title = re.sub(r'^Bocchi the Rock!\s*-\s*', '', title, flags=re.IGNORECASE)
239
+ return re.sub(r'\s*-\s*\d+\s*epochs', '', title, flags=re.IGNORECASE)
240
+
241
+ def _load_audio_input(vc_audio_mode, vc_input, vc_upload, tts_text, spaces_limit=20):
242
+ """Memuat audio dari berbagai sumber"""
243
+ temp_file = None
244
+ try:
245
+ if vc_audio_mode == "Input path" and vc_input:
246
+ audio, sr = librosa.load(vc_input, sr=16000, mono=True)
247
+ return audio.astype(np.float32), 16000, None
248
+
249
+ elif vc_audio_mode == "Upload audio":
250
+ if vc_upload is None:
251
+ raise ValueError("Please upload an audio file!")
252
+ sampling_rate, audio = vc_upload
253
+
254
+ if audio.dtype != np.float32:
255
+ audio = audio.astype(np.float32) / np.iinfo(audio.dtype).max
256
+
257
+ if len(audio.shape) > 1:
258
+ audio = np.mean(audio, axis=0)
259
+
260
+ if sampling_rate != 16000:
261
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000, res_type='kaiser_fast')
262
+
263
+ return audio.astype(np.float32), 16000, None
264
+
265
+ elif vc_audio_mode == "TTS Audio":
266
+ if not tts_text or tts_text.strip() == "":
267
+ raise ValueError("Please enter text for TTS!")
268
+
269
+ temp_file = f"tts_temp_{int(time.time())}.wav"
270
+
271
+ async def tts_task():
272
+ return await edge_tts.Communicate(tts_text, "ja-JP-NanamiNeural").save(temp_file)
273
+
274
+ try:
275
+ asyncio.run(asyncio.wait_for(tts_task(), timeout=15))
276
+ except asyncio.TimeoutError:
277
+ raise ValueError("TTS timeout!")
278
+
279
+ audio, sr = librosa.load(temp_file, sr=16000, mono=True)
280
+ return audio.astype(np.float32), 16000, temp_file
281
+
282
+ except Exception as e:
283
+ if temp_file and os.path.exists(temp_file):
284
+ os.remove(temp_file)
285
+ raise e
286
+
287
+ raise ValueError("Invalid audio mode")
288
+
289
+ def adjust_audio_speed(audio, speed):
290
+ """Menyesuaikan kecepatan audio"""
291
+ if speed == 1.0:
292
+ return audio
293
+ return librosa.effects.time_stretch(audio.astype(np.float32), rate=speed)
294
+
295
+ def preprocess_audio(audio):
296
+ """Preprocessing audio"""
297
+ if np.max(np.abs(audio)) > 1.0:
298
+ audio = audio / np.max(np.abs(audio)) * 0.9
299
+ return audio.astype(np.float32)
300
+
301
+ def create_vc_fn(model_key, tgt_sr, net_g, vc, if_f0, version, file_index):
302
+ """Membuat fungsi konversi voice"""
303
+ def vc_fn(
304
+ vc_audio_mode, vc_input, vc_upload, tts_text,
305
+ f0_up_key, f0_method, index_rate, filter_radius,
306
+ resample_sr, rms_mix_rate, protect, speed,
307
+ ):
308
+ temp_audio_file = None
309
+ try:
310
+ if torch.cuda.is_available():
311
+ torch.cuda.empty_cache()
312
+
313
+ net_g.to(config.device)
314
+
315
+ yield "Status: 🚀 Processing audio...", None
316
+
317
+ audio, sr, temp_audio_file = _load_audio_input(vc_audio_mode, vc_input, vc_upload, tts_text)
318
+ audio = preprocess_audio(audio)
319
+ audio_tensor = torch.FloatTensor(audio).to(config.device)
320
+
321
+ times = [0, 0, 0]
322
+ max_chunk_size = 16000 * 30
323
+
324
+ if len(audio) > max_chunk_size:
325
+ chunks = []
326
+ for i in range(0, len(audio), max_chunk_size):
327
+ chunk = audio[i:i + max_chunk_size]
328
+ chunk_tensor = torch.FloatTensor(chunk).to(config.device)
329
+
330
+ chunk_opt = vc.pipeline(
331
+ hubert_model, net_g, 0, chunk_tensor,
332
+ "chunk" if vc_input else "temp", times,
333
+ int(f0_up_key), f0_method, file_index, index_rate,
334
+ if_f0, filter_radius, tgt_sr, resample_sr,
335
+ rms_mix_rate, version, protect, f0_file=None,
336
+ )
337
+ chunks.append(chunk_opt)
338
+
339
+ audio_opt = np.concatenate(chunks)
340
+ else:
341
+ audio_opt = vc.pipeline(
342
+ hubert_model, net_g, 0, audio_tensor,
343
+ vc_input if vc_input else "temp", times,
344
+ int(f0_up_key), f0_method, file_index, index_rate,
345
+ if_f0, filter_radius, tgt_sr, resample_sr,
346
+ rms_mix_rate, version, protect, f0_file=None,
347
+ )
348
+
349
+ audio_opt = audio_opt.astype(np.float32)
350
+
351
+ if speed != 1.0:
352
+ audio_opt = adjust_audio_speed(audio_opt, speed)
353
+
354
+ if np.max(np.abs(audio_opt)) > 0:
355
+ audio_opt = (audio_opt / np.max(np.abs(audio_opt)) * 0.9).astype(np.float32)
356
+
357
+ yield "Status: ✅ Conversion completed!", (tgt_sr, audio_opt)
358
+
359
+ except Exception as e:
360
+ yield f"❌ Error: {str(e)}", None
361
+ finally:
362
+ if temp_audio_file and os.path.exists(temp_audio_file):
363
+ os.remove(temp_audio_file)
364
+
365
+ if torch.cuda.is_available():
366
+ torch.cuda.empty_cache()
367
+
368
+ if model_key not in model_cache:
369
+ net_g.to('cpu')
370
+
371
+ return vc_fn
372
+
373
+ def load_model():
374
+ """Memuat semua model"""
375
+ print("\n" + "=" * 50)
376
+ print("🎵 LOADING VOICE MODELS")
377
+ print("=" * 50)
378
+
379
+ categories = []
380
+ base_path = "weights"
381
+
382
+ if not os.path.exists(base_path):
383
+ print(f"❌ Folder '{base_path}' not found!")
384
+ return categories
385
+
386
+ # Buat folder_info.json jika tidak ada
387
+ folder_info_path = f"{base_path}/folder_info.json"
388
+ if not os.path.isfile(folder_info_path):
389
+ print(f"📄 Creating {folder_info_path}...")
390
+ default_folder_info = {
391
+ "BocchiTheRock": {
392
+ "title": "Bocchi the Rock! - RCV Collection",
393
+ "folder_path": "Bocchi-the-Rock",
394
+ "description": "Official RVC Weights for Bocchi the Rock! characters",
395
+ "enable": True
396
+ }
397
+ }
398
+
399
+ with open(folder_info_path, "w", encoding="utf-8") as f:
400
+ json.dump(default_folder_info, f, indent=2, ensure_ascii=False)
401
+ print(f"✅ Created {folder_info_path}")
402
+
403
+ with open(folder_info_path, "r", encoding="utf-8") as f:
404
+ folder_info = json.load(f)
405
+
406
+ print(f"📁 Found {len(folder_info)} category(ies) in folder_info.json")
407
+
408
+ for category_name, category_info in folder_info.items():
409
+ if not category_info.get('enable', True):
410
+ continue
411
+
412
+ category_title = category_info['title']
413
+ category_folder = category_info['folder_path']
414
+ description = category_info['description']
415
+
416
+ category_folder = os.path.basename(category_folder)
417
+
418
+ models = []
419
+ model_info_path = f"{base_path}/{category_folder}/model_info.json"
420
+
421
+ print(f"\n📂 Loading category: {category_title}")
422
+ print(f" Path: {model_info_path}")
423
+
424
+ if os.path.exists(model_info_path):
425
+ with open(model_info_path, "r", encoding="utf-8") as f:
426
+ models_info = json.load(f)
427
+
428
+ print(f" Found {len(models_info)} character(s) in model_info.json")
429
+
430
+ for character_name, info in models_info.items():
431
+ if not info.get('enable', True):
432
+ continue
433
+
434
+ model_title = info['title']
435
+ model_name = info['model_path']
436
+ model_author = info.get("author", "Plana-Archive")
437
+
438
+ cache_key = f"{category_folder}_{character_name}"
439
+
440
+ char_dir = f"{base_path}/{category_folder}/{character_name}"
441
+ model_path = f"{char_dir}/{model_name}"
442
+ cover_path = f"{char_dir}/{info['cover']}"
443
+ index_path = f"{char_dir}/{info['feature_retrieval_library']}"
444
+
445
+ print(f"\n 👤 Character: {character_name}")
446
+ print(f" Expected model: {model_name}")
447
+ print(f" Expected cover: {info['cover']}")
448
+ print(f" Expected index: {info['feature_retrieval_library']}")
449
+
450
+ # Cek file yang diperlukan
451
+ required_files = [model_path, cover_path, index_path]
452
+ missing_files = [f for f in required_files if not os.path.exists(f)]
453
+
454
+ if missing_files:
455
+ print(f" ⚠️ Missing files:")
456
+ for f in missing_files:
457
+ print(f" - {os.path.basename(f)}")
458
+
459
+ # Coba cari file alternatif
460
+ if os.path.exists(char_dir):
461
+ actual_files = os.listdir(char_dir)
462
+ print(f" 📁 Actual files in directory:")
463
+ for f in actual_files:
464
+ print(f" - {f}")
465
+
466
+ # Cari file .pth
467
+ pth_files = [f for f in actual_files if f.endswith('.pth')]
468
+ if pth_files and not os.path.exists(model_path):
469
+ print(f" 🔄 Found alternative model: {pth_files[0]}")
470
+ model_name = pth_files[0]
471
+ model_path = f"{char_dir}/{model_name}"
472
+
473
+ # Cari file index
474
+ index_files = [f for f in actual_files if f.endswith('.index')]
475
+ if index_files and not os.path.exists(index_path):
476
+ print(f" 🔄 Found alternative index: {index_files[0]}")
477
+ index_path = f"{char_dir}/{index_files[0]}"
478
+
479
+ # Cari cover
480
+ image_files = [f for f in actual_files if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
481
+ if image_files and not os.path.exists(cover_path):
482
+ print(f" 🔄 Found alternative cover: {image_files[0]}")
483
+ cover_path = f"{char_dir}/{image_files[0]}"
484
+
485
+ # Cek ulang setelah mencari alternatif
486
+ required_files = [model_path, cover_path, index_path]
487
+ missing_files = [f for f in required_files if not os.path.exists(f)]
488
+
489
+ if missing_files:
490
+ print(f" ❌ Skipping {character_name} - still missing files")
491
+ continue
492
+
493
+ # Gunakan cache jika tersedia
494
+ if cache_key in model_cache:
495
+ tgt_sr, net_g, vc, if_f0, version, model_index = model_cache[cache_key]
496
+ print(f" ✅ Loaded from cache")
497
+ else:
498
+ try:
499
+ print(f" ⏳ Loading model weights...")
500
+
501
+ cpt = torch.load(model_path, map_location="cpu")
502
+ tgt_sr = cpt["config"][-1]
503
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
504
+ if_f0 = cpt.get("f0", 1)
505
+ version = cpt.get("version", "v1")
506
+
507
+ if version == "v1":
508
+ if if_f0 == 1:
509
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
510
+ else:
511
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
512
+ else:
513
+ if if_f0 == 1:
514
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
515
+ else:
516
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
517
+
518
+ if hasattr(net_g, "enc_q"):
519
+ del net_g.enc_q
520
+ net_g.load_state_dict(cpt["weight"], strict=False)
521
+ net_g.eval().to('cpu')
522
+
523
+ vc = VC(tgt_sr, config)
524
+ model_cache[cache_key] = (tgt_sr, net_g, vc, if_f0, version, index_path)
525
+
526
+ print(f" ✅ Model loaded successfully (v{version}, SR: {tgt_sr})")
527
+
528
+ except Exception as e:
529
+ print(f" ❌ Error loading model: {str(e)}")
530
+ continue
531
+
532
+ models.append((
533
+ character_name,
534
+ model_title,
535
+ model_author,
536
+ cover_path,
537
+ version,
538
+ create_vc_fn(cache_key, tgt_sr, net_g, vc, if_f0, version, index_path)
539
+ ))
540
+ else:
541
+ print(f" ⚠️ model_info.json not found at: {model_info_path}")
542
+
543
+ if models:
544
+ categories.append([category_title, category_folder, description, models])
545
+ print(f"\n 📊 Category '{category_title}' loaded with {len(models)} model(s)")
546
+ else:
547
+ print(f"\n ⚠️ No models loaded for category '{category_title}'")
548
+
549
+ total_models = sum(len(models) for _, _, _, models in categories)
550
+ print(f"\n🎯 Total categories loaded: {len(categories)}")
551
+ print(f"👥 Total models loaded: {total_models}")
552
+ print("=" * 50)
553
+
554
+ return categories
555
+
556
+ def load_hubert():
557
+ """Memuat model HuBERT"""
558
+ global hubert_model, hubert_loaded
559
+ if hubert_loaded:
560
+ return
561
+
562
+ print("🔧 Loading HuBERT model...")
563
+ torch.serialization.add_safe_globals([Dictionary])
564
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
565
+ ["hubert_base.pt"],
566
+ suffix="",
567
+ )
568
+ hubert_model = models[0].to(config.device)
569
+ hubert_model = hubert_model.half() if config.is_half else hubert_model.float()
570
+ hubert_model.eval()
571
+ hubert_loaded = True
572
+ print("✅ HuBERT model loaded successfully")
573
+
574
+ def change_audio_mode(vc_audio_mode):
575
+ """Mengubah tampilan input audio"""
576
+ is_input_path = vc_audio_mode == "Input path"
577
+ is_upload = vc_audio_mode == "Upload audio"
578
+ is_tts = vc_audio_mode == "TTS Audio"
579
+
580
+ return (
581
+ gr.Textbox.update(visible=is_input_path),
582
+ gr.Checkbox.update(visible=is_upload),
583
+ gr.Audio.update(visible=is_upload),
584
+ gr.Textbox.update(visible=is_tts, lines=4 if is_tts else 2)
585
+ )
586
+
587
+ def use_microphone(microphone):
588
+ """Toggle microphone/upload source"""
589
+ return gr.Audio.update(source="microphone" if microphone else "upload")
590
+
591
+ # CSS dengan tema PINK
592
+ css = """
593
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=Quicksand:wght@400;600;700&display=swap');
594
+ body, .gradio-container { background-color: #ffffff !important; font-family: 'Inter', sans-serif !important; }
595
+ footer { display: none !important; }
596
+ .arona-loading-container { display: flex; align-items: center; justify-content: center; gap: 15px; margin-top: 15px; padding: 10px; }
597
+ .loading-text-pink { font-family: 'Quicksand', sans-serif; font-size: 20px; font-weight: 700; color: #ff69b4; letter-spacing: 1px; }
598
+ .loading-gif-small { width: 100px; height: auto; border-radius: 8px; }
599
+ .header-img-container { text-align: center; padding: 10px 0; background: #ffffff !important; }
600
+ .header-img { width: 100%; max-width: 500px; border-radius: 15px; margin: 0 auto; display: block; }
601
+ .status-card { background: #ffffff; border: 1px solid #ffe4ec; border-radius: 14px; padding: 15px 10px; margin: 0 auto 15px auto; max-width: 400px; display: flex; flex-direction: column; align-items: center; }
602
+ .status-online-box { display: flex; align-items: center; gap: 8px; margin-bottom: 12px; }
603
+ .status-details-container { display: flex; width: 100%; justify-content: center; align-items: center; border-top: 1px solid #fff0f7; padding-top: 10px; }
604
+ .status-detail-item { flex: 1; display: flex; flex-direction: column; align-items: center; text-align: center; }
605
+ .status-detail-item:first-child { border-right: 1px solid #ffe4ec; }
606
+ .status-text-main { font-size: 13px !important; font-weight: 600; color: #7b4d5a; }
607
+ .status-text-sub { font-size: 11px !important; color: #b07d8b; }
608
+ .dot-online { height: 8px; width: 8px; background-color: #ff69b4; border-radius: 50%; display: inline-block; animation: blink-pink 1.5s infinite; }
609
+ @keyframes blink-pink { 0% { opacity: 1; } 50% { opacity: 0.4; } 100% { opacity: 1; } }
610
+ .gr-form .gr-block label span, .gr-box label span, .gr-panel label span { background: linear-gradient(135deg, #ff69b4 0%, #ff1493 100%) !important; color: white !important; padding: 4px 12px !important; border-radius: 8px !important; font-weight: 600 !important; box-shadow: 0 0 15px rgba(255, 105, 180, 0.4) !important; }
611
+ input[type="range"] { accent-color: #ff69b4 !important; }
612
+ .char-scroll-box { display: grid !important; grid-template-columns: repeat(2, 1fr) !important; gap: 12px !important; max-height: 280px; overflow-y: auto; padding: 15px; background: #ffffff; border: 2px solid #ffeef4; border-radius: 14px; }
613
+ .char-card { background: white; padding: 12px; border-radius: 12px; cursor: pointer; border: 1px solid #ffe4ec; border-left: 5px solid #ff69b4; transition: all 0.2s ease; display: flex; flex-direction: column; height: 65px; }
614
+ .char-card:hover { transform: translateY(-3px); box-shadow: 0 5px 15px rgba(255, 105, 180, 0.2); border-left-color: #ff1493; }
615
+ .char-name-jp { font-weight: 700; font-size: 11px !important; color: #7b4d5a; }
616
+ .char-name-en { font-size: 8.5px !important; color: #b07d8b; text-transform: uppercase; }
617
+ .speed-section { margin-top: 20px; padding: 18px; border-radius: 20px; background: linear-gradient(135deg, #fff0f7 0%, #ffffff 100%); border: 2px solid #ffe4ec; }
618
+ .speed-title { font-family: 'Quicksand', sans-serif; font-weight: 700; color: #ff69b4; text-align: center; margin-bottom: 12px; font-size: 14px; }
619
+ .generate-btn { font-family: 'Quicksand', sans-serif; font-weight: 700 !important; background: linear-gradient(135deg, #ff69b4 0%, #ff1493 100%) !important; color: white !important; border-radius: 12px !important; padding: 12px 24px !important; transition: all 0.3s ease !important; }
620
+ .generate-btn:hover { transform: scale(1.05); box-shadow: 0 5px 20px rgba(255, 20, 147, 0.3) !important; }
621
+ .footer-text { text-align: center; padding: 20px; border-top: 1px solid #f8f0f4; color: #b07d8b; font-size: 11px; }
622
+ .speed-notes-box { font-family: 'Arial'; border: 1px solid #ffd1dc; border-radius: 8px; padding: 12px; background: #fff5f8; border-left: 4px solid #ff69b4; margin-top: 10px; }
623
+ .speed-notes-title { color: #ff1493; font-size: 12px; margin: 0 0 5px 0; font-weight: bold; }
624
+ .speed-notes-content { color: #d81b60; font-size: 11px; margin: 0; }
625
+ .model-tab { background: linear-gradient(135deg, #fff8fb 0%, #ffffff 100%) !important; border-radius: 15px !important; padding: 15px !important; }
626
+ .advanced-settings { background: #f9f9f9 !important; border-radius: 10px !important; padding: 15px !important; border: 1px solid #e0e0e0 !important; }
627
+ .error-box { background: #ffebee; border: 1px solid #ffcdd2; border-radius: 8px; padding: 15px; margin: 10px 0; color: #c62828; }
628
+ .info-box { background: #fce4ec; border: 1px solid #f8bbd9; border-radius: 8px; padding: 15px; margin: 10px 0; color: #ad1457; }
629
+ """
630
+
631
+ if __name__ == '__main__':
632
+ # Preload HuBERT
633
+ load_hubert()
634
+
635
+ # Load models
636
+ categories = load_model()
637
+ total_models = sum(len(models) for _, _, _, models in categories)
638
+
639
+ # UI dengan Gradio
640
+ with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="pink")) as app:
641
+ gr.HTML('<div class="header-img-container"><img src="https://huggingface.co/spaces/Library-Anime/Bocchi-the-Rock/resolve/main/Bocchi-the-Rock.PNG" class="header-img"></div>')
642
+
643
+ # Status card
644
+ if total_models > 0:
645
+ gr.HTML(f'''
646
+ <div class="status-card">
647
+ <div class="status-online-box">
648
+ <span class="dot-online"></span>
649
+ <b style="color: #ff69b4; font-size: 14px;">Voice Conversion System Online</b>
650
+ </div>
651
+ <div class="status-details-container">
652
+ <div class="status-detail-item">
653
+ <span class="status-text-main">👥 {total_models} Students</span>
654
+ <span class="status-text-sub">Ready for Conversion</span>
655
+ </div>
656
+ <div class="status-detail-item">
657
+ <span class="status-text-main">📊 Total Models</span>
658
+ <span class="status-text-sub">Database: {total_models}</span>
659
+ </div>
660
+ </div>
661
+ </div>
662
+ ''')
663
+ else:
664
+ gr.HTML(f'''
665
+ <div class="error-box">
666
+ <h3>⚠️ No Models Loaded</h3>
667
+ <p>Please check console logs for details.</p>
668
+ <p>Download from: <a href="https://huggingface.co/Plana-Archive/Anime-RCV" target="_blank">https://huggingface.co/Plana-Archive/Anime-RCV</a></p>
669
+ </div>
670
+ ''')
671
+
672
+ # Tabs untuk setiap kategori
673
+ if categories:
674
+ for cat_idx, (folder_title, folder, description, models) in enumerate(categories):
675
+ with gr.TabItem(folder_title, elem_classes="model-tab"):
676
+ with gr.Accordion("📑 Select Student Voice", open=True):
677
+ char_html = "".join([
678
+ f'<div class="char-card" onclick="selectModel(\'{folder_title}\', \'{name}\')">'
679
+ f'<span class="char-name-jp">{clean_title(title)}</span>'
680
+ f'<span class="char-name-en">{name}</span>'
681
+ f'</div>'
682
+ for name, title, author, cover, version, vc_fn in models
683
+ ])
684
+ gr.HTML(f'<div class="char-scroll-box">{char_html}</div>')
685
+
686
+ # Tabs untuk setiap model
687
+ with gr.Tabs():
688
+ for model_idx, (name, title, author, cover, model_version, vc_fn) in enumerate(models):
689
+ with gr.TabItem(name, id=f"model_{cat_idx}_{model_idx}"):
690
+ with gr.Row():
691
+ # Kolom kiri: Model info
692
+ with gr.Column(scale=1):
693
+ gr.HTML(f'''
694
+ <div style="display:flex;flex-direction:column;align-items:center;padding:20px;background:white;border-radius:20px;border:1px solid #ffeef4;">
695
+ <img style="width:200px;height:260px;object-fit:cover;border-radius:15px;" src="file/{cover}">
696
+ <div style="font-family:'Quicksand',sans-serif;font-weight:700;font-size:18px;color:#ff1493;margin-top:15px;">
697
+ {clean_title(title)}
698
+ </div>
699
+ <div style="font-size:11px;color:#b07d8b;margin-top:5px;">
700
+ {model_version} • {author}
701
+ </div>
702
+ </div>
703
+ ''')
704
+
705
+ # Kolom tengah: Input dan settings
706
+ with gr.Column(scale=2):
707
+ # Input group
708
+ with gr.Group():
709
+ vc_audio_mode = gr.Dropdown(
710
+ label="Input Mode",
711
+ choices=audio_mode,
712
+ value="TTS Audio"
713
+ )
714
+ vc_input = gr.Textbox(visible=False)
715
+ vc_microphone_mode = gr.Checkbox(
716
+ label="Use Microphone",
717
+ value=False
718
+ )
719
+ vc_upload = gr.Audio(
720
+ label="Upload Audio Source",
721
+ source="upload",
722
+ visible=False,
723
+ type="numpy"
724
+ )
725
+ tts_text = gr.Textbox(
726
+ label="TTS Text",
727
+ visible=True,
728
+ placeholder="Type your message here...",
729
+ lines=4
730
+ )
731
+
732
+ # Basic settings
733
+ with gr.Row():
734
+ with gr.Column():
735
+ vc_transform0 = gr.Slider(
736
+ minimum=-12,
737
+ maximum=12,
738
+ label="Pitch",
739
+ value=12,
740
+ step=1
741
+ )
742
+ f0method0 = gr.Radio(
743
+ label="Conversion Algorithm",
744
+ choices=f0method_mode,
745
+ value="rmvpe" if "rmvpe" in f0method_mode else "pm"
746
+ )
747
+ with gr.Column():
748
+ with gr.Accordion("⚙️ Advanced Tuning", open=True, elem_classes="advanced-settings"):
749
+ index_rate1 = gr.Slider(
750
+ 0, 1,
751
+ label="Index Rate",
752
+ value=0.75
753
+ )
754
+ filter_radius0 = gr.Slider(
755
+ 0, 7,
756
+ label="Filter Radius",
757
+ value=7,
758
+ step=1
759
+ )
760
+ resample_sr0 = gr.Slider(
761
+ 0, 48000,
762
+ label="Resample SR",
763
+ value=0
764
+ )
765
+ rms_mix_rate0 = gr.Slider(
766
+ 0, 1,
767
+ label="Volume Mix",
768
+ value=0.76
769
+ )
770
+ protect0 = gr.Slider(
771
+ 0, 0.5,
772
+ label="Voice Protect",
773
+ value=0.33
774
+ )
775
+
776
+ # Notes
777
+ with gr.Row():
778
+ with gr.Column():
779
+ gr.HTML("""
780
+ <div style="font-family: 'Arial'; border: 1px solid #ffd1e0; border-radius: 8px; padding: 12px; background: #fff5f9; border-left: 4px solid #ff69b4; margin-bottom: 8px;">
781
+ <h4 style="color: #ff1493; font-size: 13px; margin: 0 0 5px 0;">📝 Notes & Guide</h4>
782
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Pitch:</b> Adjust voice pitch</p>
783
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Algorithm:</b> F0 extraction method</p>
784
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Retrieval:</b> Voice similarity (0-1)</p>
785
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Filter:</b> Noise reduction</p>
786
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Volume:</b> Volume stability</p>
787
+ <p style="color: #d81b60; font-size: 11px; margin: 0;"><b>Protect:</b> Protect voice</p>
788
+ </div>
789
+ """)
790
+ with gr.Column():
791
+ gr.HTML("""
792
+ <div style="font-family: 'Arial'; border: 1px solid #ffd6e7; border-radius: 8px; padding: 12px; background: #fff0f7; border-left: 4px solid #ff69b4;">
793
+ <h4 style="color: #ff1493; font-size: 13px; margin: 0 0 5px 0;">📑 RECOMMENDED</h4>
794
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Pitch:</b> <span style="color: #ff1493; font-weight: bold;">+12</span></p>
795
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Algorithm:</b> <span style="color: #ff1493; font-weight: bold;">RMVPE</span></p>
796
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Retrieval:</b> <span style="color: #ff1493; font-weight: bold;">0.75</span></p>
797
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Filter:</b> <span style="color: #ff1493; font-weight: bold;">7</span></p>
798
+ <p style="color: #d81b60; font-size: 11px; margin: 0 0 3px 0;"><b>Volume:</b> <span style="color: #ff1493; font-weight: bold;">0.76</span></p>
799
+ <p style="color: #d81b60; font-size: 11px; margin: 0;"><b>Protect:</b> <span style="color: #ff1493; font-weight: bold;">0.33</span></p>
800
+ </div>
801
+ """)
802
+
803
+ # Speed section
804
+ with gr.Column(elem_classes="speed-section"):
805
+ gr.HTML('<div class="speed-title">⚡ VOICE SPEED CONTROL ⚡</div>')
806
+ speed_slider = gr.Slider(
807
+ 0.5, 2.0,
808
+ value=1.0,
809
+ step=0.1,
810
+ label="Speed"
811
+ )
812
+
813
+ gr.HTML("""
814
+ <div class="speed-notes-box">
815
+ <div class="speed-notes-title">ℹ️ Speed Guide</div>
816
+ <div class="speed-notes-content">
817
+ • <b>Left (0.5):</b> Slow down voice<br>
818
+ • <b>Center (1.0):</b> Normal speed<br>
819
+ • <b>Right (2.0):</b> Speed up voice<br>
820
+ </div>
821
+ </div>
822
+ """)
823
+
824
+ # Loading indicator
825
+ gr.HTML(
826
+ '<div class="arona-loading-container">'
827
+ '<div class="loading-text-pink">Ready to Generate!</div>'
828
+ '<img class="loading-gif-small" src="https://huggingface.co/spaces/Library-Anime/Bocchi-the-Rock/resolve/main/Bocchi Chan.gif">'
829
+ '</div>'
830
+ )
831
+
832
+ # Kolom kanan: Output
833
+ with gr.Column(scale=1):
834
+ vc_log = gr.Textbox(
835
+ label="Process Logs",
836
+ interactive=False,
837
+ lines=4
838
+ )
839
+ vc_output = gr.Audio(
840
+ label="Result Audio",
841
+ interactive=False,
842
+ type="numpy"
843
+ )
844
+ vc_convert = gr.Button(
845
+ "🎸 GENERATE VOICE 🎸",
846
+ variant="primary",
847
+ elem_classes="generate-btn",
848
+ size="lg"
849
+ )
850
+
851
+ # Connect button click
852
+ vc_convert.click(
853
+ fn=vc_fn,
854
+ inputs=[
855
+ vc_audio_mode, vc_input, vc_upload, tts_text,
856
+ vc_transform0, f0method0, index_rate1, filter_radius0,
857
+ resample_sr0, rms_mix_rate0, protect0, speed_slider
858
+ ],
859
+ outputs=[vc_log, vc_output]
860
+ )
861
+
862
+ # Connect audio mode change
863
+ vc_audio_mode.change(
864
+ fn=change_audio_mode,
865
+ inputs=[vc_audio_mode],
866
+ outputs=[vc_input, vc_microphone_mode, vc_upload, tts_text]
867
+ )
868
+
869
+ # Connect microphone toggle
870
+ vc_microphone_mode.change(
871
+ fn=use_microphone,
872
+ inputs=vc_microphone_mode,
873
+ outputs=vc_upload
874
+ )
875
+
876
+ # Footer
877
+ gr.HTML(
878
+ '<div class="footer-text">'
879
+ '<div>DESIGNED BY ☘️Mutsumi-Chan☘️</div>'
880
+ '<div style="font-weight:700; color:#b07d8b;">Bocchi the Rock - RCV v1.0 • Pink Edition</div>'
881
+ '</div>'
882
+ )
883
+
884
+ # JavaScript untuk model selection
885
+ app.load(
886
+ None, None, None,
887
+ js="""
888
+ () => {
889
+ window.selectModel = (cat, mod) => {
890
+ const tabs = document.querySelectorAll('.tabs .tab-nav button');
891
+ for (let t of tabs) {
892
+ if (t.textContent.trim() === cat) {
893
+ t.click();
894
+ setTimeout(() => {
895
+ const mTabs = document.querySelectorAll('.tabs .tab-nav button');
896
+ for (let mt of mTabs) {
897
+ if (mt.textContent.trim() === mod) {
898
+ mt.click();
899
+ window.scrollTo({top: 0, behavior: 'smooth'});
900
+ }
901
+ }
902
+ }, 100);
903
+ break;
904
+ }
905
+ }
906
+ }
907
+ }
908
+ """
909
+ )
910
+
911
+ # Launch app
912
+ print("\n" + "=" * 50)
913
+ print("🌐 STARTING WEB INTERFACE")
914
+ print("=" * 50)
915
+
916
+ app.queue(max_size=3).launch(
917
+ share=False,
918
+ server_name="0.0.0.0" if os.getenv('SPACE_ID') else "127.0.0.1",
919
+ server_port=7860,
920
+ quiet=False,
921
+ show_error=True
922
+ )
Bocchi-the-Rock/config.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ class Config:
4
+ def __init__(self):
5
+ self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
6
+ self.is_half = self.device != "cpu"
7
+ self.n_cpu = 0
8
+ self.gpu_name = None
9
+ self.gpu_mem = None
10
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
11
+
12
+ def device_config(self) -> tuple:
13
+ if torch.cuda.is_available():
14
+ i_device = int(self.device.split(":")[-1])
15
+ self.gpu_name = torch.cuda.get_device_name(i_device)
16
+ if ("16" in self.gpu_name and "V100" not in self.gpu_name):
17
+ print("16-series GPU detected, forcing full precision (half-precision not supported).")
18
+ self.is_half = False
19
+ self.gpu_mem = int(torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4)
20
+ if self.gpu_mem <= 4:
21
+ x_pad, x_query, x_center, x_max = 1, 5, 5, 8
22
+ elif self.gpu_mem <= 5:
23
+ x_pad, x_query, x_center, x_max = 1, 6, 6, 8
24
+ else:
25
+ x_pad, x_query, x_center, x_max = 3, 8, 8, 12
26
+ else:
27
+ x_pad, x_query, x_center, x_max = 1, 5, 5, 8
28
+ return x_pad, x_query, x_center, x_max
Bocchi-the-Rock/edgetts_db.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tts_order_voice = {
2
+ 'English-Jenny (Female)': 'en-US-JennyNeural',
3
+ 'English-Guy (Male)': 'en-US-GuyNeural',
4
+ 'English-Ana (Female)': 'en-US-AnaNeural',
5
+ 'English-Aria (Female)': 'en-US-AriaNeural',
6
+ 'English-Christopher (Male)': 'en-US-ChristopherNeural',
7
+ 'English-Eric (Male)': 'en-US-EricNeural',
8
+ 'English-Michelle (Female)': 'en-US-MichelleNeural',
9
+ 'English-Roger (Male)': 'en-US-RogerNeural',
10
+ 'Spanish (Mexican)-Dalia (Female)': 'es-MX-DaliaNeural',
11
+ 'Spanish (Mexican)-Jorge- (Male)': 'es-MX-JorgeNeural',
12
+ 'Korean-Sun-Hi- (Female)': 'ko-KR-SunHiNeural',
13
+ 'Korean-InJoon- (Male)': 'ko-KR-InJoonNeural',
14
+ 'Thai-Premwadee- (Female)': 'th-TH-PremwadeeNeural',
15
+ 'Thai-Niwat- (Male)': 'th-TH-NiwatNeural',
16
+ 'Vietnamese-HoaiMy- (Female)': 'vi-VN-HoaiMyNeural',
17
+ 'Vietnamese-NamMinh- (Male)': 'vi-VN-NamMinhNeural',
18
+ 'Japanese-Nanami- (Female)': 'ja-JP-NanamiNeural',
19
+ 'Japanese-Keita- (Male)': 'ja-JP-KeitaNeural',
20
+ 'French-Denise- (Female)': 'fr-FR-DeniseNeural',
21
+ 'French-Eloise- (Female)': 'fr-FR-EloiseNeural',
22
+ 'French-Henri- (Male)': 'fr-FR-HenriNeural',
23
+ 'Brazilian-Francisca- (Female)': 'pt-BR-FranciscaNeural',
24
+ 'Brazilian-Antonio- (Male)': 'pt-BR-AntonioNeural',
25
+ 'Indonesian-Ardi- (Male)': 'id-ID-ArdiNeural',
26
+ 'Indonesian-Gadis- (Female)': 'id-ID-GadisNeural',
27
+ 'Hebrew-Avri- (Male)': 'he-IL-AvriNeural',
28
+ 'Hebrew-Hila- (Female)': 'he-IL-HilaNeural',
29
+ 'Italian-Isabella- (Female)': 'it-IT-IsabellaNeural',
30
+ 'Italian-Diego- (Male)': 'it-IT-DiegoNeural',
31
+ 'Italian-Elsa- (Female)': 'it-IT-ElsaNeural',
32
+ 'Dutch-Colette- (Female)': 'nl-NL-ColetteNeural',
33
+ 'Dutch-Fenna- (Female)': 'nl-NL-FennaNeural',
34
+ 'Dutch-Maarten- (Male)': 'nl-NL-MaartenNeural',
35
+ 'Malese-Osman- (Male)': 'ms-MY-OsmanNeural',
36
+ 'Malese-Yasmin- (Female)': 'ms-MY-YasminNeural',
37
+ 'Norwegian-Pernille- (Female)': 'nb-NO-PernilleNeural',
38
+ 'Norwegian-Finn- (Male)': 'nb-NO-FinnNeural',
39
+ 'Swedish-Sofie- (Female)': 'sv-SE-SofieNeural',
40
+ 'ArabicSwedish-Mattias- (Male)': 'sv-SE-MattiasNeural',
41
+ 'Arabic-Hamed- (Male)': 'ar-SA-HamedNeural',
42
+ 'Arabic-Zariyah- (Female)': 'ar-SA-ZariyahNeural',
43
+ 'Greek-Athina- (Female)': 'el-GR-AthinaNeural',
44
+ 'Greek-Nestoras- (Male)': 'el-GR-NestorasNeural',
45
+ 'German-Katja- (Female)': 'de-DE-KatjaNeural',
46
+ 'German-Amala- (Female)': 'de-DE-AmalaNeural',
47
+ 'German-Conrad- (Male)': 'de-DE-ConradNeural',
48
+ 'German-Killian- (Male)': 'de-DE-KillianNeural',
49
+ 'Afrikaans-Adri- (Female)': 'af-ZA-AdriNeural',
50
+ 'Afrikaans-Willem- (Male)': 'af-ZA-WillemNeural',
51
+ 'Ethiopian-Ameha- (Male)': 'am-ET-AmehaNeural',
52
+ 'Ethiopian-Mekdes- (Female)': 'am-ET-MekdesNeural',
53
+ 'Arabic (UAD)-Fatima- (Female)': 'ar-AE-FatimaNeural',
54
+ 'Arabic (UAD)-Hamdan- (Male)': 'ar-AE-HamdanNeural',
55
+ 'Arabic (Bahrain)-Ali- (Male)': 'ar-BH-AliNeural',
56
+ 'Arabic (Bahrain)-Laila- (Female)': 'ar-BH-LailaNeural',
57
+ 'Arabic (Algeria)-Ismael- (Male)': 'ar-DZ-IsmaelNeural',
58
+ 'Arabic (Egypt)-Salma- (Female)': 'ar-EG-SalmaNeural',
59
+ 'Arabic (Egypt)-Shakir- (Male)': 'ar-EG-ShakirNeural',
60
+ 'Arabic (Iraq)-Bassel- (Male)': 'ar-IQ-BasselNeural',
61
+ 'Arabic (Iraq)-Rana- (Female)': 'ar-IQ-RanaNeural',
62
+ 'Arabic (Jordan)-Sana- (Female)': 'ar-JO-SanaNeural',
63
+ 'Arabic (Jordan)-Taim- (Male)': 'ar-JO-TaimNeural',
64
+ 'Arabic (Kuwait)-Fahed- (Male)': 'ar-KW-FahedNeural',
65
+ 'Arabic (Kuwait)-Noura- (Female)': 'ar-KW-NouraNeural',
66
+ 'Arabic (Lebanon)-Layla- (Female)': 'ar-LB-LaylaNeural',
67
+ 'Arabic (Lebanon)-Rami- (Male)': 'ar-LB-RamiNeural',
68
+ 'Arabic (Libya)-Iman- (Female)': 'ar-LY-ImanNeural',
69
+ 'Arabic (Libya)-Omar- (Male)': 'ar-LY-OmarNeural',
70
+ 'Arabic (Morocco)-Jamal- (Male)': 'ar-MA-JamalNeural',
71
+ 'Arabic (Morocco)-Mouna- (Female)': 'ar-MA-MounaNeural',
72
+ 'Arabic (Oman)-Abdullah- (Male)': 'ar-OM-AbdullahNeural',
73
+ 'Arabic (Oman)-Aysha- (Female)': 'ar-OM-AyshaNeural',
74
+ 'Arabic (Qatar)-Amal- (Female)': 'ar-QA-AmalNeural',
75
+ 'Arabic (Qatar)-Moaz- (Male)': 'ar-QA-MoazNeural',
76
+ 'Arabic (Syrian Arab Republic)-Amany- (Female)': 'ar-SY-AmanyNeural',
77
+ 'Arabic (Syrian Arab Republic)-Laith- (Male)': 'ar-SY-LaithNeural',
78
+ 'Arabic (Tunisia)-Hedi- (Male)': 'ar-TN-HediNeural',
79
+ 'Arabic (Tunisia)-Reem- (Female)': 'ar-TN-ReemNeural',
80
+ 'Arabic (Yemen )-Maryam- (Female)': 'ar-YE-MaryamNeural',
81
+ 'Arabic (Yemen )-Saleh- (Male)': 'ar-YE-SalehNeural',
82
+ 'Azerbaijani-Babek- (Male)': 'az-AZ-BabekNeural',
83
+ 'Azerbaijani-Banu- (Female)': 'az-AZ-BanuNeural',
84
+ 'Bulgarian-Borislav- (Male)': 'bg-BG-BorislavNeural',
85
+ 'Bulgarian-Kalina- (Female)': 'bg-BG-KalinaNeural',
86
+ 'Bengali (Bangladesh)-Nabanita- (Female)': 'bn-BD-NabanitaNeural',
87
+ 'Bengali (Bangladesh)-Pradeep- (Male)': 'bn-BD-PradeepNeural',
88
+ 'Bengali (India)-Bashkar- (Male)': 'bn-IN-BashkarNeural',
89
+ 'Bengali (India)-Tanishaa- (Female)': 'bn-IN-TanishaaNeural',
90
+ 'Bosniak (Bosnia and Herzegovina)-Goran- (Male)': 'bs-BA-GoranNeural',
91
+ 'Bosniak (Bosnia and Herzegovina)-Vesna- (Female)': 'bs-BA-VesnaNeural',
92
+ 'Catalan (Spain)-Joana- (Female)': 'ca-ES-JoanaNeural',
93
+ 'Catalan (Spain)-Enric- (Male)': 'ca-ES-EnricNeural',
94
+ 'Czech (Czech Republic)-Antonin- (Male)': 'cs-CZ-AntoninNeural',
95
+ 'Czech (Czech Republic)-Vlasta- (Female)': 'cs-CZ-VlastaNeural',
96
+ 'Welsh (UK)-Aled- (Male)': 'cy-GB-AledNeural',
97
+ 'Welsh (UK)-Nia- (Female)': 'cy-GB-NiaNeural',
98
+ 'Danish (Denmark)-Christel- (Female)': 'da-DK-ChristelNeural',
99
+ 'Danish (Denmark)-Jeppe- (Male)': 'da-DK-JeppeNeural',
100
+ 'German (Austria)-Ingrid- (Female)': 'de-AT-IngridNeural',
101
+ 'German (Austria)-Jonas- (Male)': 'de-AT-JonasNeural',
102
+ 'German (Switzerland)-Jan- (Male)': 'de-CH-JanNeural',
103
+ 'German (Switzerland)-Leni- (Female)': 'de-CH-LeniNeural',
104
+ 'English (Australia)-Natasha- (Female)': 'en-AU-NatashaNeural',
105
+ 'English (Australia)-William- (Male)': 'en-AU-WilliamNeural',
106
+ 'English (Canada)-Clara- (Female)': 'en-CA-ClaraNeural',
107
+ 'English (Canada)-Liam- (Male)': 'en-CA-LiamNeural',
108
+ 'English (UK)-Libby- (Female)': 'en-GB-LibbyNeural',
109
+ 'English (UK)-Maisie- (Female)': 'en-GB-MaisieNeural',
110
+ 'English (UK)-Ryan- (Male)': 'en-GB-RyanNeural',
111
+ 'English (UK)-Sonia- (Female)': 'en-GB-SoniaNeural',
112
+ 'English (UK)-Thomas- (Male)': 'en-GB-ThomasNeural',
113
+ 'English (Hong Kong)-Sam- (Male)': 'en-HK-SamNeural',
114
+ 'English (Hong Kong)-Yan- (Female)': 'en-HK-YanNeural',
115
+ 'English (Ireland)-Connor- (Male)': 'en-IE-ConnorNeural',
116
+ 'English (Ireland)-Emily- (Female)': 'en-IE-EmilyNeural',
117
+ 'English (India)-Neerja- (Female)': 'en-IN-NeerjaNeural',
118
+ 'English (India)-Prabhat- (Male)': 'en-IN-PrabhatNeural',
119
+ 'English (Kenya)-Asilia- (Female)': 'en-KE-AsiliaNeural',
120
+ 'English (Kenya)-Chilemba- (Male)': 'en-KE-ChilembaNeural',
121
+ 'English (Nigeria)-Abeo- (Male)': 'en-NG-AbeoNeural',
122
+ 'English (Nigeria)-Ezinne- (Female)': 'en-NG-EzinneNeural',
123
+ 'English (New Zealand)-Mitchell- (Male)': 'en-NZ-MitchellNeural',
124
+ 'English (Philippines)-James- (Male)': 'en-PH-JamesNeural',
125
+ 'English (Philippines)-Rosa- (Female)': 'en-PH-RosaNeural',
126
+ 'English (Singapore)-Luna- (Female)': 'en-SG-LunaNeural',
127
+ 'English (Singapore)-Wayne- (Male)': 'en-SG-WayneNeural',
128
+ 'English (Tanzania)-Elimu- (Male)': 'en-TZ-ElimuNeural',
129
+ 'English (Tanzania)-Imani- (Female)': 'en-TZ-ImaniNeural',
130
+ 'English (South Africa)-Leah- (Female)': 'en-ZA-LeahNeural',
131
+ 'English (South Africa)-Luke- (Male)': 'en-ZA-LukeNeural',
132
+ 'Spanish (Argentina)-Elena- (Female)': 'es-AR-ElenaNeural',
133
+ 'Spanish (Argentina)-Tomas- (Male)': 'es-AR-TomasNeural',
134
+ 'Spanish (Bolivia)-Marcelo- (Male)': 'es-BO-MarceloNeural',
135
+ 'Spanish (Bolivia)-Sofia- (Female)': 'es-BO-SofiaNeural',
136
+ 'Spanish (Colombia)-Gonzalo- (Male)': 'es-CO-GonzaloNeural',
137
+ 'Spanish (Colombia)-Salome- (Female)': 'es-CO-SalomeNeural',
138
+ 'Spanish (Costa Rica)-Juan- (Male)': 'es-CR-JuanNeural',
139
+ 'Spanish (Costa Rica)-Maria- (Female)': 'es-CR-MariaNeural',
140
+ 'Spanish (Cuba)-Belkys- (Female)': 'es-CU-BelkysNeural',
141
+ 'Spanish (Dominican Republic)-Emilio- (Male)': 'es-DO-EmilioNeural',
142
+ 'Spanish (Dominican Republic)-Ramona- (Female)': 'es-DO-RamonaNeural',
143
+ 'Spanish (Ecuador)-Andrea- (Female)': 'es-EC-AndreaNeural',
144
+ 'Spanish (Ecuador)-Luis- (Male)': 'es-EC-LuisNeural',
145
+ 'Spanish (Spain)-Alvaro- (Male)': 'es-ES-AlvaroNeural',
146
+ 'Spanish (Spain)-Elvira- (Female)': 'es-ES-ElviraNeural',
147
+ 'Spanish (Equatorial Guinea)-Teresa- (Female)': 'es-GQ-TeresaNeural',
148
+ 'Spanish (Guatemala)-Andres- (Male)': 'es-GT-AndresNeural',
149
+ 'Spanish (Guatemala)-Marta- (Female)': 'es-GT-MartaNeural',
150
+ 'Spanish (Honduras)-Carlos- (Male)': 'es-HN-CarlosNeural',
151
+ 'Spanish (Honduras)-Karla- (Female)': 'es-HN-KarlaNeural',
152
+ 'Spanish (Nicaragua)-Federico- (Male)': 'es-NI-FedericoNeural',
153
+ 'Spanish (Nicaragua)-Yolanda- (Female)': 'es-NI-YolandaNeural',
154
+ 'Spanish (Panama)-Margarita- (Female)': 'es-PA-MargaritaNeural',
155
+ 'Spanish (Panama)-Roberto- (Male)': 'es-PA-RobertoNeural',
156
+ 'Spanish (Peru)-Alex- (Male)': 'es-PE-AlexNeural',
157
+ 'Spanish (Peru)-Camila- (Female)': 'es-PE-CamilaNeural',
158
+ 'Spanish (Puerto Rico)-Karina- (Female)': 'es-PR-KarinaNeural',
159
+ 'Spanish (Puerto Rico)-Victor- (Male)': 'es-PR-VictorNeural',
160
+ 'Spanish (Paraguay)-Mario- (Male)': 'es-PY-MarioNeural',
161
+ 'Spanish (Paraguay)-Tania- (Female)': 'es-PY-TaniaNeural',
162
+ 'Spanish (El Salvador)-Lorena- (Female)': 'es-SV-LorenaNeural',
163
+ 'Spanish (El Salvador)-Rodrigo- (Male)': 'es-SV-RodrigoNeural',
164
+ 'Spanish (United States)-Alonso- (Male)': 'es-US-AlonsoNeural',
165
+ 'Spanish (United States)-Paloma- (Female)': 'es-US-PalomaNeural',
166
+ 'Spanish (Uruguay)-Mateo- (Male)': 'es-UY-MateoNeural',
167
+ 'Spanish (Uruguay)-Valentina- (Female)': 'es-UY-ValentinaNeural',
168
+ 'Spanish (Venezuela)-Paola- (Female)': 'es-VE-PaolaNeural',
169
+ 'Spanish (Venezuela)-Sebastian- (Male)': 'es-VE-SebastianNeural',
170
+ 'Estonian (Estonia)-Anu- (Female)': 'et-EE-AnuNeural',
171
+ 'Estonian (Estonia)-Kert- (Male)': 'et-EE-KertNeural',
172
+ 'Persian (Iran)-Dilara- (Female)': 'fa-IR-DilaraNeural',
173
+ 'Persian (Iran)-Farid- (Male)': 'fa-IR-FaridNeural',
174
+ 'Finnish (Finland)-Harri- (Male)': 'fi-FI-HarriNeural',
175
+ 'Finnish (Finland)-Noora- (Female)': 'fi-FI-NooraNeural',
176
+ 'French (Belgium)-Charline- (Female)': 'fr-BE-CharlineNeural',
177
+ 'French (Belgium)-Gerard- (Male)': 'fr-BE-GerardNeural',
178
+ 'French (Canada)-Sylvie- (Female)': 'fr-CA-SylvieNeural',
179
+ 'French (Canada)-Antoine- (Male)': 'fr-CA-AntoineNeural',
180
+ 'French (Canada)-Jean- (Male)': 'fr-CA-JeanNeural',
181
+ 'French (Switzerland)-Ariane- (Female)': 'fr-CH-ArianeNeural',
182
+ 'French (Switzerland)-Fabrice- (Male)': 'fr-CH-FabriceNeural',
183
+ 'Irish (Ireland)-Colm- (Male)': 'ga-IE-ColmNeural',
184
+ 'Irish (Ireland)-Orla- (Female)': 'ga-IE-OrlaNeural',
185
+ 'Galician (Spain)-Roi- (Male)': 'gl-ES-RoiNeural',
186
+ 'Galician (Spain)-Sabela- (Female)': 'gl-ES-SabelaNeural',
187
+ 'Gujarati (India)-Dhwani- (Female)': 'gu-IN-DhwaniNeural',
188
+ 'Gujarati (India)-Niranjan- (Male)': 'gu-IN-NiranjanNeural',
189
+ 'Hindi (India)-Madhur- (Male)': 'hi-IN-MadhurNeural',
190
+ 'Hindi (India)-Swara- (Female)': 'hi-IN-SwaraNeural',
191
+ 'Croatian (Croatia)-Gabrijela- (Female)': 'hr-HR-GabrijelaNeural',
192
+ 'Croatian (Croatia)-Srecko- (Male)': 'hr-HR-SreckoNeural',
193
+ 'Hungarian (Hungary)-Noemi- (Female)': 'hu-HU-NoemiNeural',
194
+ 'Hungarian (Hungary)-Tamas- (Male)': 'hu-HU-TamasNeural',
195
+ 'Icelandic (Iceland)-Gudrun- (Female)': 'is-IS-GudrunNeural',
196
+ 'Icelandic (Iceland)-Gunnar- (Male)': 'is-IS-GunnarNeural',
197
+ 'Javanese (Indonesia)-Dimas- (Male)': 'jv-ID-DimasNeural',
198
+ 'Javanese (Indonesia)-Siti- (Female)': 'jv-ID-SitiNeural',
199
+ 'Georgian (Georgia)-Eka- (Female)': 'ka-GE-EkaNeural',
200
+ 'Georgian (Georgia)-Giorgi- (Male)': 'ka-GE-GiorgiNeural',
201
+ 'Kazakh (Kazakhstan)-Aigul- (Female)': 'kk-KZ-AigulNeural',
202
+ 'Kazakh (Kazakhstan)-Daulet- (Male)': 'kk-KZ-DauletNeural',
203
+ 'Khmer (Cambodia)-Piseth- (Male)': 'km-KH-PisethNeural',
204
+ 'Khmer (Cambodia)-Sreymom- (Female)': 'km-KH-SreymomNeural',
205
+ 'Kannada (India)-Gagan- (Male)': 'kn-IN-GaganNeural',
206
+ 'Kannada (India)-Sapna- (Female)': 'kn-IN-SapnaNeural',
207
+ 'Lao (Laos)-Chanthavong- (Male)': 'lo-LA-ChanthavongNeural',
208
+ 'Lao (Laos)-Keomany- (Female)': 'lo-LA-KeomanyNeural',
209
+ 'Lithuanian (Lithuania)-Leonas- (Male)': 'lt-LT-LeonasNeural',
210
+ 'Lithuanian (Lithuania)-Ona- (Female)': 'lt-LT-OnaNeural',
211
+ 'Latvian (Latvia)-Everita- (Female)': 'lv-LV-EveritaNeural',
212
+ 'Latvian (Latvia)-Nils- (Male)': 'lv-LV-NilsNeural',
213
+ 'Macedonian (North Macedonia)-Aleksandar- (Male)': 'mk-MK-AleksandarNeural',
214
+ 'Macedonian (North Macedonia)-Marija- (Female)': 'mk-MK-MarijaNeural',
215
+ 'Malayalam (India)-Midhun- (Male)': 'ml-IN-MidhunNeural',
216
+ 'Malayalam (India)-Sobhana- (Female)': 'ml-IN-SobhanaNeural',
217
+ 'Mongolian (Mongolia)-Bataa- (Male)': 'mn-MN-BataaNeural',
218
+ 'Mongolian (Mongolia)-Yesui- (Female)': 'mn-MN-YesuiNeural',
219
+ 'Marathi (India)-Aarohi- (Female)': 'mr-IN-AarohiNeural',
220
+ 'Marathi (India)-Manohar- (Male)': 'mr-IN-ManoharNeural',
221
+ 'Maltese (Malta)-Grace- (Female)': 'mt-MT-GraceNeural',
222
+ 'Maltese (Malta)-Joseph- (Male)': 'mt-MT-JosephNeural',
223
+ 'Burmese (Myanmar)-Nilar- (Female)': 'my-MM-NilarNeural',
224
+ 'Burmese (Myanmar)-Thiha- (Male)': 'my-MM-ThihaNeural',
225
+ 'Nepali (Nepal)-Hemkala- (Female)': 'ne-NP-HemkalaNeural',
226
+ 'Nepali (Nepal)-Sagar- (Male)': 'ne-NP-SagarNeural',
227
+ 'Dutch (Belgium)-Arnaud- (Male)': 'nl-BE-ArnaudNeural',
228
+ 'Dutch (Belgium)-Dena- (Female)': 'nl-BE-DenaNeural',
229
+ 'Polish (Poland)-Marek- (Male)': 'pl-PL-MarekNeural',
230
+ 'Polish (Poland)-Zofia- (Female)': 'pl-PL-ZofiaNeural',
231
+ 'Pashto (Afghanistan)-Gul Nawaz- (Male)': 'ps-AF-Gul',
232
+ }
Bocchi-the-Rock/hubert_base.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96
3
+ size 189507909
Bocchi-the-Rock/lib/infer_pack/attentions.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack import modules
10
+ from lib.infer_pack.modules import LayerNorm
11
+
12
+
13
+ class Encoder(nn.Module):
14
+ def __init__(
15
+ self,
16
+ hidden_channels,
17
+ filter_channels,
18
+ n_heads,
19
+ n_layers,
20
+ kernel_size=1,
21
+ p_dropout=0.0,
22
+ window_size=10,
23
+ **kwargs
24
+ ):
25
+ super().__init__()
26
+ self.hidden_channels = hidden_channels
27
+ self.filter_channels = filter_channels
28
+ self.n_heads = n_heads
29
+ self.n_layers = n_layers
30
+ self.kernel_size = kernel_size
31
+ self.p_dropout = p_dropout
32
+ self.window_size = window_size
33
+
34
+ self.drop = nn.Dropout(p_dropout)
35
+ self.attn_layers = nn.ModuleList()
36
+ self.norm_layers_1 = nn.ModuleList()
37
+ self.ffn_layers = nn.ModuleList()
38
+ self.norm_layers_2 = nn.ModuleList()
39
+ for i in range(self.n_layers):
40
+ self.attn_layers.append(
41
+ MultiHeadAttention(
42
+ hidden_channels,
43
+ hidden_channels,
44
+ n_heads,
45
+ p_dropout=p_dropout,
46
+ window_size=window_size,
47
+ )
48
+ )
49
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
50
+ self.ffn_layers.append(
51
+ FFN(
52
+ hidden_channels,
53
+ hidden_channels,
54
+ filter_channels,
55
+ kernel_size,
56
+ p_dropout=p_dropout,
57
+ )
58
+ )
59
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
60
+
61
+ def forward(self, x, x_mask):
62
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
63
+ x = x * x_mask
64
+ for i in range(self.n_layers):
65
+ y = self.attn_layers[i](x, x, attn_mask)
66
+ y = self.drop(y)
67
+ x = self.norm_layers_1[i](x + y)
68
+
69
+ y = self.ffn_layers[i](x, x_mask)
70
+ y = self.drop(y)
71
+ x = self.norm_layers_2[i](x + y)
72
+ x = x * x_mask
73
+ return x
74
+
75
+
76
+ class Decoder(nn.Module):
77
+ def __init__(
78
+ self,
79
+ hidden_channels,
80
+ filter_channels,
81
+ n_heads,
82
+ n_layers,
83
+ kernel_size=1,
84
+ p_dropout=0.0,
85
+ proximal_bias=False,
86
+ proximal_init=True,
87
+ **kwargs
88
+ ):
89
+ super().__init__()
90
+ self.hidden_channels = hidden_channels
91
+ self.filter_channels = filter_channels
92
+ self.n_heads = n_heads
93
+ self.n_layers = n_layers
94
+ self.kernel_size = kernel_size
95
+ self.p_dropout = p_dropout
96
+ self.proximal_bias = proximal_bias
97
+ self.proximal_init = proximal_init
98
+
99
+ self.drop = nn.Dropout(p_dropout)
100
+ self.self_attn_layers = nn.ModuleList()
101
+ self.norm_layers_0 = nn.ModuleList()
102
+ self.encdec_attn_layers = nn.ModuleList()
103
+ self.norm_layers_1 = nn.ModuleList()
104
+ self.ffn_layers = nn.ModuleList()
105
+ self.norm_layers_2 = nn.ModuleList()
106
+ for i in range(self.n_layers):
107
+ self.self_attn_layers.append(
108
+ MultiHeadAttention(
109
+ hidden_channels,
110
+ hidden_channels,
111
+ n_heads,
112
+ p_dropout=p_dropout,
113
+ proximal_bias=proximal_bias,
114
+ proximal_init=proximal_init,
115
+ )
116
+ )
117
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
118
+ self.encdec_attn_layers.append(
119
+ MultiHeadAttention(
120
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
121
+ )
122
+ )
123
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
124
+ self.ffn_layers.append(
125
+ FFN(
126
+ hidden_channels,
127
+ hidden_channels,
128
+ filter_channels,
129
+ kernel_size,
130
+ p_dropout=p_dropout,
131
+ causal=True,
132
+ )
133
+ )
134
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
135
+
136
+ def forward(self, x, x_mask, h, h_mask):
137
+ """
138
+ x: decoder input
139
+ h: encoder output
140
+ """
141
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
142
+ device=x.device, dtype=x.dtype
143
+ )
144
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
145
+ x = x * x_mask
146
+ for i in range(self.n_layers):
147
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
148
+ y = self.drop(y)
149
+ x = self.norm_layers_0[i](x + y)
150
+
151
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
152
+ y = self.drop(y)
153
+ x = self.norm_layers_1[i](x + y)
154
+
155
+ y = self.ffn_layers[i](x, x_mask)
156
+ y = self.drop(y)
157
+ x = self.norm_layers_2[i](x + y)
158
+ x = x * x_mask
159
+ return x
160
+
161
+
162
+ class MultiHeadAttention(nn.Module):
163
+ def __init__(
164
+ self,
165
+ channels,
166
+ out_channels,
167
+ n_heads,
168
+ p_dropout=0.0,
169
+ window_size=None,
170
+ heads_share=True,
171
+ block_length=None,
172
+ proximal_bias=False,
173
+ proximal_init=False,
174
+ ):
175
+ super().__init__()
176
+ assert channels % n_heads == 0
177
+
178
+ self.channels = channels
179
+ self.out_channels = out_channels
180
+ self.n_heads = n_heads
181
+ self.p_dropout = p_dropout
182
+ self.window_size = window_size
183
+ self.heads_share = heads_share
184
+ self.block_length = block_length
185
+ self.proximal_bias = proximal_bias
186
+ self.proximal_init = proximal_init
187
+ self.attn = None
188
+
189
+ self.k_channels = channels // n_heads
190
+ self.conv_q = nn.Conv1d(channels, channels, 1)
191
+ self.conv_k = nn.Conv1d(channels, channels, 1)
192
+ self.conv_v = nn.Conv1d(channels, channels, 1)
193
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
194
+ self.drop = nn.Dropout(p_dropout)
195
+
196
+ if window_size is not None:
197
+ n_heads_rel = 1 if heads_share else n_heads
198
+ rel_stddev = self.k_channels**-0.5
199
+ self.emb_rel_k = nn.Parameter(
200
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
201
+ * rel_stddev
202
+ )
203
+ self.emb_rel_v = nn.Parameter(
204
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
205
+ * rel_stddev
206
+ )
207
+
208
+ nn.init.xavier_uniform_(self.conv_q.weight)
209
+ nn.init.xavier_uniform_(self.conv_k.weight)
210
+ nn.init.xavier_uniform_(self.conv_v.weight)
211
+ if proximal_init:
212
+ with torch.no_grad():
213
+ self.conv_k.weight.copy_(self.conv_q.weight)
214
+ self.conv_k.bias.copy_(self.conv_q.bias)
215
+
216
+ def forward(self, x, c, attn_mask=None):
217
+ q = self.conv_q(x)
218
+ k = self.conv_k(c)
219
+ v = self.conv_v(c)
220
+
221
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
222
+
223
+ x = self.conv_o(x)
224
+ return x
225
+
226
+ def attention(self, query, key, value, mask=None):
227
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
228
+ b, d, t_s, t_t = (*key.size(), query.size(2))
229
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
230
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
231
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
232
+
233
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
234
+ if self.window_size is not None:
235
+ assert (
236
+ t_s == t_t
237
+ ), "Relative attention is only available for self-attention."
238
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
239
+ rel_logits = self._matmul_with_relative_keys(
240
+ query / math.sqrt(self.k_channels), key_relative_embeddings
241
+ )
242
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
243
+ scores = scores + scores_local
244
+ if self.proximal_bias:
245
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
246
+ scores = scores + self._attention_bias_proximal(t_s).to(
247
+ device=scores.device, dtype=scores.dtype
248
+ )
249
+ if mask is not None:
250
+ scores = scores.masked_fill(mask == 0, -1e4)
251
+ if self.block_length is not None:
252
+ assert (
253
+ t_s == t_t
254
+ ), "Local attention is only available for self-attention."
255
+ block_mask = (
256
+ torch.ones_like(scores)
257
+ .triu(-self.block_length)
258
+ .tril(self.block_length)
259
+ )
260
+ scores = scores.masked_fill(block_mask == 0, -1e4)
261
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
262
+ p_attn = self.drop(p_attn)
263
+ output = torch.matmul(p_attn, value)
264
+ if self.window_size is not None:
265
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
266
+ value_relative_embeddings = self._get_relative_embeddings(
267
+ self.emb_rel_v, t_s
268
+ )
269
+ output = output + self._matmul_with_relative_values(
270
+ relative_weights, value_relative_embeddings
271
+ )
272
+ output = (
273
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
274
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
275
+ return output, p_attn
276
+
277
+ def _matmul_with_relative_values(self, x, y):
278
+ """
279
+ x: [b, h, l, m]
280
+ y: [h or 1, m, d]
281
+ ret: [b, h, l, d]
282
+ """
283
+ ret = torch.matmul(x, y.unsqueeze(0))
284
+ return ret
285
+
286
+ def _matmul_with_relative_keys(self, x, y):
287
+ """
288
+ x: [b, h, l, d]
289
+ y: [h or 1, m, d]
290
+ ret: [b, h, l, m]
291
+ """
292
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
293
+ return ret
294
+
295
+ def _get_relative_embeddings(self, relative_embeddings, length):
296
+ max_relative_position = 2 * self.window_size + 1
297
+ # Pad first before slice to avoid using cond ops.
298
+ pad_length = max(length - (self.window_size + 1), 0)
299
+ slice_start_position = max((self.window_size + 1) - length, 0)
300
+ slice_end_position = slice_start_position + 2 * length - 1
301
+ if pad_length > 0:
302
+ padded_relative_embeddings = F.pad(
303
+ relative_embeddings,
304
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
305
+ )
306
+ else:
307
+ padded_relative_embeddings = relative_embeddings
308
+ used_relative_embeddings = padded_relative_embeddings[
309
+ :, slice_start_position:slice_end_position
310
+ ]
311
+ return used_relative_embeddings
312
+
313
+ def _relative_position_to_absolute_position(self, x):
314
+ """
315
+ x: [b, h, l, 2*l-1]
316
+ ret: [b, h, l, l]
317
+ """
318
+ batch, heads, length, _ = x.size()
319
+ # Concat columns of pad to shift from relative to absolute indexing.
320
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
321
+
322
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
323
+ x_flat = x.view([batch, heads, length * 2 * length])
324
+ x_flat = F.pad(
325
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
326
+ )
327
+
328
+ # Reshape and slice out the padded elements.
329
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
330
+ :, :, :length, length - 1 :
331
+ ]
332
+ return x_final
333
+
334
+ def _absolute_position_to_relative_position(self, x):
335
+ """
336
+ x: [b, h, l, l]
337
+ ret: [b, h, l, 2*l-1]
338
+ """
339
+ batch, heads, length, _ = x.size()
340
+ # padd along column
341
+ x = F.pad(
342
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
343
+ )
344
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
345
+ # add 0's in the beginning that will skew the elements after reshape
346
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
347
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
348
+ return x_final
349
+
350
+ def _attention_bias_proximal(self, length):
351
+ """Bias for self-attention to encourage attention to close positions.
352
+ Args:
353
+ length: an integer scalar.
354
+ Returns:
355
+ a Tensor with shape [1, 1, length, length]
356
+ """
357
+ r = torch.arange(length, dtype=torch.float32)
358
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
359
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
360
+
361
+
362
+ class FFN(nn.Module):
363
+ def __init__(
364
+ self,
365
+ in_channels,
366
+ out_channels,
367
+ filter_channels,
368
+ kernel_size,
369
+ p_dropout=0.0,
370
+ activation=None,
371
+ causal=False,
372
+ ):
373
+ super().__init__()
374
+ self.in_channels = in_channels
375
+ self.out_channels = out_channels
376
+ self.filter_channels = filter_channels
377
+ self.kernel_size = kernel_size
378
+ self.p_dropout = p_dropout
379
+ self.activation = activation
380
+ self.causal = causal
381
+
382
+ if causal:
383
+ self.padding = self._causal_padding
384
+ else:
385
+ self.padding = self._same_padding
386
+
387
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
388
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
389
+ self.drop = nn.Dropout(p_dropout)
390
+
391
+ def forward(self, x, x_mask):
392
+ x = self.conv_1(self.padding(x * x_mask))
393
+ if self.activation == "gelu":
394
+ x = x * torch.sigmoid(1.702 * x)
395
+ else:
396
+ x = torch.relu(x)
397
+ x = self.drop(x)
398
+ x = self.conv_2(self.padding(x * x_mask))
399
+ return x * x_mask
400
+
401
+ def _causal_padding(self, x):
402
+ if self.kernel_size == 1:
403
+ return x
404
+ pad_l = self.kernel_size - 1
405
+ pad_r = 0
406
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
407
+ x = F.pad(x, commons.convert_pad_shape(padding))
408
+ return x
409
+
410
+ def _same_padding(self, x):
411
+ if self.kernel_size == 1:
412
+ return x
413
+ pad_l = (self.kernel_size - 1) // 2
414
+ pad_r = self.kernel_size // 2
415
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
416
+ x = F.pad(x, commons.convert_pad_shape(padding))
417
+ return x
Bocchi-the-Rock/lib/infer_pack/commons.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size * dilation - dilation) / 2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
25
+ """KL(P||Q)"""
26
+ kl = (logs_q - logs_p) - 0.5
27
+ kl += (
28
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
+ )
30
+ return kl
31
+
32
+
33
+ def rand_gumbel(shape):
34
+ """Sample from the Gumbel distribution, protect from overflows."""
35
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
+ return -torch.log(-torch.log(uniform_samples))
37
+
38
+
39
+ def rand_gumbel_like(x):
40
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
+ return g
42
+
43
+
44
+ def slice_segments(x, ids_str, segment_size=4):
45
+ ret = torch.zeros_like(x[:, :, :segment_size])
46
+ for i in range(x.size(0)):
47
+ idx_str = ids_str[i]
48
+ idx_end = idx_str + segment_size
49
+ ret[i] = x[i, :, idx_str:idx_end]
50
+ return ret
51
+
52
+
53
+ def slice_segments2(x, ids_str, segment_size=4):
54
+ ret = torch.zeros_like(x[:, :segment_size])
55
+ for i in range(x.size(0)):
56
+ idx_str = ids_str[i]
57
+ idx_end = idx_str + segment_size
58
+ ret[i] = x[i, idx_str:idx_end]
59
+ return ret
60
+
61
+
62
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
+ b, d, t = x.size()
64
+ if x_lengths is None:
65
+ x_lengths = t
66
+ ids_str_max = x_lengths - segment_size + 1
67
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
+ ret = slice_segments(x, ids_str, segment_size)
69
+ return ret, ids_str
70
+
71
+
72
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
+ position = torch.arange(length, dtype=torch.float)
74
+ num_timescales = channels // 2
75
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
+ num_timescales - 1
77
+ )
78
+ inv_timescales = min_timescale * torch.exp(
79
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
+ )
81
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
84
+ signal = signal.view(1, channels, length)
85
+ return signal
86
+
87
+
88
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
+ b, channels, length = x.size()
90
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
+ return x + signal.to(dtype=x.dtype, device=x.device)
92
+
93
+
94
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
+ b, channels, length = x.size()
96
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
+
99
+
100
+ def subsequent_mask(length):
101
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
+ return mask
103
+
104
+
105
+ @torch.jit.script
106
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
+ n_channels_int = n_channels[0]
108
+ in_act = input_a + input_b
109
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
+ acts = t_act * s_act
112
+ return acts
113
+
114
+
115
+ def convert_pad_shape(pad_shape):
116
+ l = pad_shape[::-1]
117
+ pad_shape = [item for sublist in l for item in sublist]
118
+ return pad_shape
119
+
120
+
121
+ def shift_1d(x):
122
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
+ return x
124
+
125
+
126
+ def sequence_mask(length, max_length=None):
127
+ if max_length is None:
128
+ max_length = length.max()
129
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
+ return x.unsqueeze(0) < length.unsqueeze(1)
131
+
132
+
133
+ def generate_path(duration, mask):
134
+ """
135
+ duration: [b, 1, t_x]
136
+ mask: [b, 1, t_y, t_x]
137
+ """
138
+ device = duration.device
139
+
140
+ b, _, t_y, t_x = mask.shape
141
+ cum_duration = torch.cumsum(duration, -1)
142
+
143
+ cum_duration_flat = cum_duration.view(b * t_x)
144
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
+ path = path.view(b, t_x, t_y)
146
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
+ path = path.unsqueeze(1).transpose(2, 3) * mask
148
+ return path
149
+
150
+
151
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
152
+ if isinstance(parameters, torch.Tensor):
153
+ parameters = [parameters]
154
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
155
+ norm_type = float(norm_type)
156
+ if clip_value is not None:
157
+ clip_value = float(clip_value)
158
+
159
+ total_norm = 0
160
+ for p in parameters:
161
+ param_norm = p.grad.data.norm(norm_type)
162
+ total_norm += param_norm.item() ** norm_type
163
+ if clip_value is not None:
164
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
+ total_norm = total_norm ** (1.0 / norm_type)
166
+ return total_norm
Bocchi-the-Rock/lib/infer_pack/models.py ADDED
@@ -0,0 +1,1142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math, pdb, os
2
+ from time import time as ttime
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from lib.infer_pack import modules
7
+ from lib.infer_pack import attentions
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack.commons import init_weights, get_padding
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from lib.infer_pack.commons import init_weights
13
+ import numpy as np
14
+ from lib.infer_pack import commons
15
+
16
+
17
+ class TextEncoder256(nn.Module):
18
+ def __init__(
19
+ self,
20
+ out_channels,
21
+ hidden_channels,
22
+ filter_channels,
23
+ n_heads,
24
+ n_layers,
25
+ kernel_size,
26
+ p_dropout,
27
+ f0=True,
28
+ ):
29
+ super().__init__()
30
+ self.out_channels = out_channels
31
+ self.hidden_channels = hidden_channels
32
+ self.filter_channels = filter_channels
33
+ self.n_heads = n_heads
34
+ self.n_layers = n_layers
35
+ self.kernel_size = kernel_size
36
+ self.p_dropout = p_dropout
37
+ self.emb_phone = nn.Linear(256, hidden_channels)
38
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
+ if f0 == True:
40
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
+ self.encoder = attentions.Encoder(
42
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
+ )
44
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
+
46
+ def forward(self, phone, pitch, lengths):
47
+ if pitch == None:
48
+ x = self.emb_phone(phone)
49
+ else:
50
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
+ x = self.lrelu(x)
53
+ x = torch.transpose(x, 1, -1) # [b, h, t]
54
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
+ x.dtype
56
+ )
57
+ x = self.encoder(x * x_mask, x_mask)
58
+ stats = self.proj(x) * x_mask
59
+
60
+ m, logs = torch.split(stats, self.out_channels, dim=1)
61
+ return m, logs, x_mask
62
+
63
+
64
+ class TextEncoder768(nn.Module):
65
+ def __init__(
66
+ self,
67
+ out_channels,
68
+ hidden_channels,
69
+ filter_channels,
70
+ n_heads,
71
+ n_layers,
72
+ kernel_size,
73
+ p_dropout,
74
+ f0=True,
75
+ ):
76
+ super().__init__()
77
+ self.out_channels = out_channels
78
+ self.hidden_channels = hidden_channels
79
+ self.filter_channels = filter_channels
80
+ self.n_heads = n_heads
81
+ self.n_layers = n_layers
82
+ self.kernel_size = kernel_size
83
+ self.p_dropout = p_dropout
84
+ self.emb_phone = nn.Linear(768, hidden_channels)
85
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
+ if f0 == True:
87
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
+ self.encoder = attentions.Encoder(
89
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
+ )
91
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
+
93
+ def forward(self, phone, pitch, lengths):
94
+ if pitch == None:
95
+ x = self.emb_phone(phone)
96
+ else:
97
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
+ x = self.lrelu(x)
100
+ x = torch.transpose(x, 1, -1) # [b, h, t]
101
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
+ x.dtype
103
+ )
104
+ x = self.encoder(x * x_mask, x_mask)
105
+ stats = self.proj(x) * x_mask
106
+
107
+ m, logs = torch.split(stats, self.out_channels, dim=1)
108
+ return m, logs, x_mask
109
+
110
+
111
+ class ResidualCouplingBlock(nn.Module):
112
+ def __init__(
113
+ self,
114
+ channels,
115
+ hidden_channels,
116
+ kernel_size,
117
+ dilation_rate,
118
+ n_layers,
119
+ n_flows=4,
120
+ gin_channels=0,
121
+ ):
122
+ super().__init__()
123
+ self.channels = channels
124
+ self.hidden_channels = hidden_channels
125
+ self.kernel_size = kernel_size
126
+ self.dilation_rate = dilation_rate
127
+ self.n_layers = n_layers
128
+ self.n_flows = n_flows
129
+ self.gin_channels = gin_channels
130
+
131
+ self.flows = nn.ModuleList()
132
+ for i in range(n_flows):
133
+ self.flows.append(
134
+ modules.ResidualCouplingLayer(
135
+ channels,
136
+ hidden_channels,
137
+ kernel_size,
138
+ dilation_rate,
139
+ n_layers,
140
+ gin_channels=gin_channels,
141
+ mean_only=True,
142
+ )
143
+ )
144
+ self.flows.append(modules.Flip())
145
+
146
+ def forward(self, x, x_mask, g=None, reverse=False):
147
+ if not reverse:
148
+ for flow in self.flows:
149
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
+ else:
151
+ for flow in reversed(self.flows):
152
+ x = flow(x, x_mask, g=g, reverse=reverse)
153
+ return x
154
+
155
+ def remove_weight_norm(self):
156
+ for i in range(self.n_flows):
157
+ self.flows[i * 2].remove_weight_norm()
158
+
159
+
160
+ class PosteriorEncoder(nn.Module):
161
+ def __init__(
162
+ self,
163
+ in_channels,
164
+ out_channels,
165
+ hidden_channels,
166
+ kernel_size,
167
+ dilation_rate,
168
+ n_layers,
169
+ gin_channels=0,
170
+ ):
171
+ super().__init__()
172
+ self.in_channels = in_channels
173
+ self.out_channels = out_channels
174
+ self.hidden_channels = hidden_channels
175
+ self.kernel_size = kernel_size
176
+ self.dilation_rate = dilation_rate
177
+ self.n_layers = n_layers
178
+ self.gin_channels = gin_channels
179
+
180
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
+ self.enc = modules.WN(
182
+ hidden_channels,
183
+ kernel_size,
184
+ dilation_rate,
185
+ n_layers,
186
+ gin_channels=gin_channels,
187
+ )
188
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
+
190
+ def forward(self, x, x_lengths, g=None):
191
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
+ x.dtype
193
+ )
194
+ x = self.pre(x) * x_mask
195
+ x = self.enc(x, x_mask, g=g)
196
+ stats = self.proj(x) * x_mask
197
+ m, logs = torch.split(stats, self.out_channels, dim=1)
198
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
+ return z, m, logs, x_mask
200
+
201
+ def remove_weight_norm(self):
202
+ self.enc.remove_weight_norm()
203
+
204
+
205
+ class Generator(torch.nn.Module):
206
+ def __init__(
207
+ self,
208
+ initial_channel,
209
+ resblock,
210
+ resblock_kernel_sizes,
211
+ resblock_dilation_sizes,
212
+ upsample_rates,
213
+ upsample_initial_channel,
214
+ upsample_kernel_sizes,
215
+ gin_channels=0,
216
+ ):
217
+ super(Generator, self).__init__()
218
+ self.num_kernels = len(resblock_kernel_sizes)
219
+ self.num_upsamples = len(upsample_rates)
220
+ self.conv_pre = Conv1d(
221
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
222
+ )
223
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
+
225
+ self.ups = nn.ModuleList()
226
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
+ self.ups.append(
228
+ weight_norm(
229
+ ConvTranspose1d(
230
+ upsample_initial_channel // (2**i),
231
+ upsample_initial_channel // (2 ** (i + 1)),
232
+ k,
233
+ u,
234
+ padding=(k - u) // 2,
235
+ )
236
+ )
237
+ )
238
+
239
+ self.resblocks = nn.ModuleList()
240
+ for i in range(len(self.ups)):
241
+ ch = upsample_initial_channel // (2 ** (i + 1))
242
+ for j, (k, d) in enumerate(
243
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
+ ):
245
+ self.resblocks.append(resblock(ch, k, d))
246
+
247
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
+ self.ups.apply(init_weights)
249
+
250
+ if gin_channels != 0:
251
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
+
253
+ def forward(self, x, g=None):
254
+ x = self.conv_pre(x)
255
+ if g is not None:
256
+ x = x + self.cond(g)
257
+
258
+ for i in range(self.num_upsamples):
259
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
+ x = self.ups[i](x)
261
+ xs = None
262
+ for j in range(self.num_kernels):
263
+ if xs is None:
264
+ xs = self.resblocks[i * self.num_kernels + j](x)
265
+ else:
266
+ xs += self.resblocks[i * self.num_kernels + j](x)
267
+ x = xs / self.num_kernels
268
+ x = F.leaky_relu(x)
269
+ x = self.conv_post(x)
270
+ x = torch.tanh(x)
271
+
272
+ return x
273
+
274
+ def remove_weight_norm(self):
275
+ for l in self.ups:
276
+ remove_weight_norm(l)
277
+ for l in self.resblocks:
278
+ l.remove_weight_norm()
279
+
280
+
281
+ class SineGen(torch.nn.Module):
282
+ """Definition of sine generator
283
+ SineGen(samp_rate, harmonic_num = 0,
284
+ sine_amp = 0.1, noise_std = 0.003,
285
+ voiced_threshold = 0,
286
+ flag_for_pulse=False)
287
+ samp_rate: sampling rate in Hz
288
+ harmonic_num: number of harmonic overtones (default 0)
289
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
290
+ noise_std: std of Gaussian noise (default 0.003)
291
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
+ Note: when flag_for_pulse is True, the first time step of a voiced
294
+ segment is always sin(np.pi) or cos(0)
295
+ """
296
+
297
+ def __init__(
298
+ self,
299
+ samp_rate,
300
+ harmonic_num=0,
301
+ sine_amp=0.1,
302
+ noise_std=0.003,
303
+ voiced_threshold=0,
304
+ flag_for_pulse=False,
305
+ ):
306
+ super(SineGen, self).__init__()
307
+ self.sine_amp = sine_amp
308
+ self.noise_std = noise_std
309
+ self.harmonic_num = harmonic_num
310
+ self.dim = self.harmonic_num + 1
311
+ self.sampling_rate = samp_rate
312
+ self.voiced_threshold = voiced_threshold
313
+
314
+ def _f02uv(self, f0):
315
+ # generate uv signal
316
+ uv = torch.ones_like(f0)
317
+ uv = uv * (f0 > self.voiced_threshold)
318
+ return uv
319
+
320
+ def forward(self, f0, upp):
321
+ """sine_tensor, uv = forward(f0)
322
+ input F0: tensor(batchsize=1, length, dim=1)
323
+ f0 for unvoiced steps should be 0
324
+ output sine_tensor: tensor(batchsize=1, length, dim)
325
+ output uv: tensor(batchsize=1, length, 1)
326
+ """
327
+ with torch.no_grad():
328
+ f0 = f0[:, None].transpose(1, 2)
329
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
+ # fundamental component
331
+ f0_buf[:, :, 0] = f0[:, :, 0]
332
+ for idx in np.arange(self.harmonic_num):
333
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
+ idx + 2
335
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
+ rand_ini = torch.rand(
338
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
+ )
340
+ rand_ini[:, 0] = 0
341
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
+ tmp_over_one *= upp
344
+ tmp_over_one = F.interpolate(
345
+ tmp_over_one.transpose(2, 1),
346
+ scale_factor=upp,
347
+ mode="linear",
348
+ align_corners=True,
349
+ ).transpose(2, 1)
350
+ rad_values = F.interpolate(
351
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
+ ).transpose(
353
+ 2, 1
354
+ ) #######
355
+ tmp_over_one %= 1
356
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
+ cumsum_shift = torch.zeros_like(rad_values)
358
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
+ sine_waves = torch.sin(
360
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
+ )
362
+ sine_waves = sine_waves * self.sine_amp
363
+ uv = self._f02uv(f0)
364
+ uv = F.interpolate(
365
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
+ ).transpose(2, 1)
367
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
+ noise = noise_amp * torch.randn_like(sine_waves)
369
+ sine_waves = sine_waves * uv + noise
370
+ return sine_waves, uv, noise
371
+
372
+
373
+ class SourceModuleHnNSF(torch.nn.Module):
374
+ """SourceModule for hn-nsf
375
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
+ add_noise_std=0.003, voiced_threshod=0)
377
+ sampling_rate: sampling_rate in Hz
378
+ harmonic_num: number of harmonic above F0 (default: 0)
379
+ sine_amp: amplitude of sine source signal (default: 0.1)
380
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
381
+ note that amplitude of noise in unvoiced is decided
382
+ by sine_amp
383
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
384
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
+ F0_sampled (batchsize, length, 1)
386
+ Sine_source (batchsize, length, 1)
387
+ noise_source (batchsize, length 1)
388
+ uv (batchsize, length, 1)
389
+ """
390
+
391
+ def __init__(
392
+ self,
393
+ sampling_rate,
394
+ harmonic_num=0,
395
+ sine_amp=0.1,
396
+ add_noise_std=0.003,
397
+ voiced_threshod=0,
398
+ is_half=True,
399
+ ):
400
+ super(SourceModuleHnNSF, self).__init__()
401
+
402
+ self.sine_amp = sine_amp
403
+ self.noise_std = add_noise_std
404
+ self.is_half = is_half
405
+ # to produce sine waveforms
406
+ self.l_sin_gen = SineGen(
407
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
+ )
409
+
410
+ # to merge source harmonics into a single excitation
411
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
+ self.l_tanh = torch.nn.Tanh()
413
+
414
+ def forward(self, x, upp=None):
415
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
+ if self.is_half:
417
+ sine_wavs = sine_wavs.half()
418
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
+ return sine_merge, None, None # noise, uv
420
+
421
+
422
+ class GeneratorNSF(torch.nn.Module):
423
+ def __init__(
424
+ self,
425
+ initial_channel,
426
+ resblock,
427
+ resblock_kernel_sizes,
428
+ resblock_dilation_sizes,
429
+ upsample_rates,
430
+ upsample_initial_channel,
431
+ upsample_kernel_sizes,
432
+ gin_channels,
433
+ sr,
434
+ is_half=False,
435
+ ):
436
+ super(GeneratorNSF, self).__init__()
437
+ self.num_kernels = len(resblock_kernel_sizes)
438
+ self.num_upsamples = len(upsample_rates)
439
+
440
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
+ self.m_source = SourceModuleHnNSF(
442
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
443
+ )
444
+ self.noise_convs = nn.ModuleList()
445
+ self.conv_pre = Conv1d(
446
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
447
+ )
448
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
+
450
+ self.ups = nn.ModuleList()
451
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
453
+ self.ups.append(
454
+ weight_norm(
455
+ ConvTranspose1d(
456
+ upsample_initial_channel // (2**i),
457
+ upsample_initial_channel // (2 ** (i + 1)),
458
+ k,
459
+ u,
460
+ padding=(k - u) // 2,
461
+ )
462
+ )
463
+ )
464
+ if i + 1 < len(upsample_rates):
465
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
466
+ self.noise_convs.append(
467
+ Conv1d(
468
+ 1,
469
+ c_cur,
470
+ kernel_size=stride_f0 * 2,
471
+ stride=stride_f0,
472
+ padding=stride_f0 // 2,
473
+ )
474
+ )
475
+ else:
476
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
+
478
+ self.resblocks = nn.ModuleList()
479
+ for i in range(len(self.ups)):
480
+ ch = upsample_initial_channel // (2 ** (i + 1))
481
+ for j, (k, d) in enumerate(
482
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
+ ):
484
+ self.resblocks.append(resblock(ch, k, d))
485
+
486
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
+ self.ups.apply(init_weights)
488
+
489
+ if gin_channels != 0:
490
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
+
492
+ self.upp = np.prod(upsample_rates)
493
+
494
+ def forward(self, x, f0, g=None):
495
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
496
+ har_source = har_source.transpose(1, 2)
497
+ x = self.conv_pre(x)
498
+ if g is not None:
499
+ x = x + self.cond(g)
500
+
501
+ for i in range(self.num_upsamples):
502
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
+ x = self.ups[i](x)
504
+ x_source = self.noise_convs[i](har_source)
505
+ x = x + x_source
506
+ xs = None
507
+ for j in range(self.num_kernels):
508
+ if xs is None:
509
+ xs = self.resblocks[i * self.num_kernels + j](x)
510
+ else:
511
+ xs += self.resblocks[i * self.num_kernels + j](x)
512
+ x = xs / self.num_kernels
513
+ x = F.leaky_relu(x)
514
+ x = self.conv_post(x)
515
+ x = torch.tanh(x)
516
+ return x
517
+
518
+ def remove_weight_norm(self):
519
+ for l in self.ups:
520
+ remove_weight_norm(l)
521
+ for l in self.resblocks:
522
+ l.remove_weight_norm()
523
+
524
+
525
+ sr2sr = {
526
+ "32k": 32000,
527
+ "40k": 40000,
528
+ "48k": 48000,
529
+ }
530
+
531
+
532
+ class SynthesizerTrnMs256NSFsid(nn.Module):
533
+ def __init__(
534
+ self,
535
+ spec_channels,
536
+ segment_size,
537
+ inter_channels,
538
+ hidden_channels,
539
+ filter_channels,
540
+ n_heads,
541
+ n_layers,
542
+ kernel_size,
543
+ p_dropout,
544
+ resblock,
545
+ resblock_kernel_sizes,
546
+ resblock_dilation_sizes,
547
+ upsample_rates,
548
+ upsample_initial_channel,
549
+ upsample_kernel_sizes,
550
+ spk_embed_dim,
551
+ gin_channels,
552
+ sr,
553
+ **kwargs
554
+ ):
555
+ super().__init__()
556
+ if type(sr) == type("strr"):
557
+ sr = sr2sr[sr]
558
+ self.spec_channels = spec_channels
559
+ self.inter_channels = inter_channels
560
+ self.hidden_channels = hidden_channels
561
+ self.filter_channels = filter_channels
562
+ self.n_heads = n_heads
563
+ self.n_layers = n_layers
564
+ self.kernel_size = kernel_size
565
+ self.p_dropout = p_dropout
566
+ self.resblock = resblock
567
+ self.resblock_kernel_sizes = resblock_kernel_sizes
568
+ self.resblock_dilation_sizes = resblock_dilation_sizes
569
+ self.upsample_rates = upsample_rates
570
+ self.upsample_initial_channel = upsample_initial_channel
571
+ self.upsample_kernel_sizes = upsample_kernel_sizes
572
+ self.segment_size = segment_size
573
+ self.gin_channels = gin_channels
574
+ # self.hop_length = hop_length#
575
+ self.spk_embed_dim = spk_embed_dim
576
+ self.enc_p = TextEncoder256(
577
+ inter_channels,
578
+ hidden_channels,
579
+ filter_channels,
580
+ n_heads,
581
+ n_layers,
582
+ kernel_size,
583
+ p_dropout,
584
+ )
585
+ self.dec = GeneratorNSF(
586
+ inter_channels,
587
+ resblock,
588
+ resblock_kernel_sizes,
589
+ resblock_dilation_sizes,
590
+ upsample_rates,
591
+ upsample_initial_channel,
592
+ upsample_kernel_sizes,
593
+ gin_channels=gin_channels,
594
+ sr=sr,
595
+ is_half=kwargs["is_half"],
596
+ )
597
+ self.enc_q = PosteriorEncoder(
598
+ spec_channels,
599
+ inter_channels,
600
+ hidden_channels,
601
+ 5,
602
+ 1,
603
+ 16,
604
+ gin_channels=gin_channels,
605
+ )
606
+ self.flow = ResidualCouplingBlock(
607
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
608
+ )
609
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
610
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
611
+
612
+ def remove_weight_norm(self):
613
+ self.dec.remove_weight_norm()
614
+ self.flow.remove_weight_norm()
615
+ self.enc_q.remove_weight_norm()
616
+
617
+ def forward(
618
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
619
+ ): # 这里ds是id,[bs,1]
620
+ # print(1,pitch.shape)#[bs,t]
621
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
622
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
623
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
624
+ z_p = self.flow(z, y_mask, g=g)
625
+ z_slice, ids_slice = commons.rand_slice_segments(
626
+ z, y_lengths, self.segment_size
627
+ )
628
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
629
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
630
+ # print(-2,pitchf.shape,z_slice.shape)
631
+ o = self.dec(z_slice, pitchf, g=g)
632
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
633
+
634
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
635
+ g = self.emb_g(sid).unsqueeze(-1)
636
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
637
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
638
+ if rate:
639
+ head = int(z_p.shape[2] * rate)
640
+ z_p = z_p[:, :, -head:]
641
+ x_mask = x_mask[:, :, -head:]
642
+ nsff0 = nsff0[:, -head:]
643
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
644
+ o = self.dec(z * x_mask, nsff0, g=g)
645
+ return o, x_mask, (z, z_p, m_p, logs_p)
646
+
647
+
648
+ class SynthesizerTrnMs768NSFsid(nn.Module):
649
+ def __init__(
650
+ self,
651
+ spec_channels,
652
+ segment_size,
653
+ inter_channels,
654
+ hidden_channels,
655
+ filter_channels,
656
+ n_heads,
657
+ n_layers,
658
+ kernel_size,
659
+ p_dropout,
660
+ resblock,
661
+ resblock_kernel_sizes,
662
+ resblock_dilation_sizes,
663
+ upsample_rates,
664
+ upsample_initial_channel,
665
+ upsample_kernel_sizes,
666
+ spk_embed_dim,
667
+ gin_channels,
668
+ sr,
669
+ **kwargs
670
+ ):
671
+ super().__init__()
672
+ if type(sr) == type("strr"):
673
+ sr = sr2sr[sr]
674
+ self.spec_channels = spec_channels
675
+ self.inter_channels = inter_channels
676
+ self.hidden_channels = hidden_channels
677
+ self.filter_channels = filter_channels
678
+ self.n_heads = n_heads
679
+ self.n_layers = n_layers
680
+ self.kernel_size = kernel_size
681
+ self.p_dropout = p_dropout
682
+ self.resblock = resblock
683
+ self.resblock_kernel_sizes = resblock_kernel_sizes
684
+ self.resblock_dilation_sizes = resblock_dilation_sizes
685
+ self.upsample_rates = upsample_rates
686
+ self.upsample_initial_channel = upsample_initial_channel
687
+ self.upsample_kernel_sizes = upsample_kernel_sizes
688
+ self.segment_size = segment_size
689
+ self.gin_channels = gin_channels
690
+ # self.hop_length = hop_length#
691
+ self.spk_embed_dim = spk_embed_dim
692
+ self.enc_p = TextEncoder768(
693
+ inter_channels,
694
+ hidden_channels,
695
+ filter_channels,
696
+ n_heads,
697
+ n_layers,
698
+ kernel_size,
699
+ p_dropout,
700
+ )
701
+ self.dec = GeneratorNSF(
702
+ inter_channels,
703
+ resblock,
704
+ resblock_kernel_sizes,
705
+ resblock_dilation_sizes,
706
+ upsample_rates,
707
+ upsample_initial_channel,
708
+ upsample_kernel_sizes,
709
+ gin_channels=gin_channels,
710
+ sr=sr,
711
+ is_half=kwargs["is_half"],
712
+ )
713
+ self.enc_q = PosteriorEncoder(
714
+ spec_channels,
715
+ inter_channels,
716
+ hidden_channels,
717
+ 5,
718
+ 1,
719
+ 16,
720
+ gin_channels=gin_channels,
721
+ )
722
+ self.flow = ResidualCouplingBlock(
723
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
724
+ )
725
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
726
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
727
+
728
+ def remove_weight_norm(self):
729
+ self.dec.remove_weight_norm()
730
+ self.flow.remove_weight_norm()
731
+ self.enc_q.remove_weight_norm()
732
+
733
+ def forward(
734
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
735
+ ): # 这里ds是id,[bs,1]
736
+ # print(1,pitch.shape)#[bs,t]
737
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
738
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
739
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
740
+ z_p = self.flow(z, y_mask, g=g)
741
+ z_slice, ids_slice = commons.rand_slice_segments(
742
+ z, y_lengths, self.segment_size
743
+ )
744
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
745
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
746
+ # print(-2,pitchf.shape,z_slice.shape)
747
+ o = self.dec(z_slice, pitchf, g=g)
748
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
749
+
750
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
751
+ g = self.emb_g(sid).unsqueeze(-1)
752
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
753
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
754
+ if rate:
755
+ head = int(z_p.shape[2] * rate)
756
+ z_p = z_p[:, :, -head:]
757
+ x_mask = x_mask[:, :, -head:]
758
+ nsff0 = nsff0[:, -head:]
759
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
760
+ o = self.dec(z * x_mask, nsff0, g=g)
761
+ return o, x_mask, (z, z_p, m_p, logs_p)
762
+
763
+
764
+ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
765
+ def __init__(
766
+ self,
767
+ spec_channels,
768
+ segment_size,
769
+ inter_channels,
770
+ hidden_channels,
771
+ filter_channels,
772
+ n_heads,
773
+ n_layers,
774
+ kernel_size,
775
+ p_dropout,
776
+ resblock,
777
+ resblock_kernel_sizes,
778
+ resblock_dilation_sizes,
779
+ upsample_rates,
780
+ upsample_initial_channel,
781
+ upsample_kernel_sizes,
782
+ spk_embed_dim,
783
+ gin_channels,
784
+ sr=None,
785
+ **kwargs
786
+ ):
787
+ super().__init__()
788
+ self.spec_channels = spec_channels
789
+ self.inter_channels = inter_channels
790
+ self.hidden_channels = hidden_channels
791
+ self.filter_channels = filter_channels
792
+ self.n_heads = n_heads
793
+ self.n_layers = n_layers
794
+ self.kernel_size = kernel_size
795
+ self.p_dropout = p_dropout
796
+ self.resblock = resblock
797
+ self.resblock_kernel_sizes = resblock_kernel_sizes
798
+ self.resblock_dilation_sizes = resblock_dilation_sizes
799
+ self.upsample_rates = upsample_rates
800
+ self.upsample_initial_channel = upsample_initial_channel
801
+ self.upsample_kernel_sizes = upsample_kernel_sizes
802
+ self.segment_size = segment_size
803
+ self.gin_channels = gin_channels
804
+ # self.hop_length = hop_length#
805
+ self.spk_embed_dim = spk_embed_dim
806
+ self.enc_p = TextEncoder256(
807
+ inter_channels,
808
+ hidden_channels,
809
+ filter_channels,
810
+ n_heads,
811
+ n_layers,
812
+ kernel_size,
813
+ p_dropout,
814
+ f0=False,
815
+ )
816
+ self.dec = Generator(
817
+ inter_channels,
818
+ resblock,
819
+ resblock_kernel_sizes,
820
+ resblock_dilation_sizes,
821
+ upsample_rates,
822
+ upsample_initial_channel,
823
+ upsample_kernel_sizes,
824
+ gin_channels=gin_channels,
825
+ )
826
+ self.enc_q = PosteriorEncoder(
827
+ spec_channels,
828
+ inter_channels,
829
+ hidden_channels,
830
+ 5,
831
+ 1,
832
+ 16,
833
+ gin_channels=gin_channels,
834
+ )
835
+ self.flow = ResidualCouplingBlock(
836
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
837
+ )
838
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
839
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
840
+
841
+ def remove_weight_norm(self):
842
+ self.dec.remove_weight_norm()
843
+ self.flow.remove_weight_norm()
844
+ self.enc_q.remove_weight_norm()
845
+
846
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
847
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
848
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
849
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
850
+ z_p = self.flow(z, y_mask, g=g)
851
+ z_slice, ids_slice = commons.rand_slice_segments(
852
+ z, y_lengths, self.segment_size
853
+ )
854
+ o = self.dec(z_slice, g=g)
855
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
856
+
857
+ def infer(self, phone, phone_lengths, sid, rate=None):
858
+ g = self.emb_g(sid).unsqueeze(-1)
859
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
860
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
861
+ if rate:
862
+ head = int(z_p.shape[2] * rate)
863
+ z_p = z_p[:, :, -head:]
864
+ x_mask = x_mask[:, :, -head:]
865
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
866
+ o = self.dec(z * x_mask, g=g)
867
+ return o, x_mask, (z, z_p, m_p, logs_p)
868
+
869
+
870
+ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
871
+ def __init__(
872
+ self,
873
+ spec_channels,
874
+ segment_size,
875
+ inter_channels,
876
+ hidden_channels,
877
+ filter_channels,
878
+ n_heads,
879
+ n_layers,
880
+ kernel_size,
881
+ p_dropout,
882
+ resblock,
883
+ resblock_kernel_sizes,
884
+ resblock_dilation_sizes,
885
+ upsample_rates,
886
+ upsample_initial_channel,
887
+ upsample_kernel_sizes,
888
+ spk_embed_dim,
889
+ gin_channels,
890
+ sr=None,
891
+ **kwargs
892
+ ):
893
+ super().__init__()
894
+ self.spec_channels = spec_channels
895
+ self.inter_channels = inter_channels
896
+ self.hidden_channels = hidden_channels
897
+ self.filter_channels = filter_channels
898
+ self.n_heads = n_heads
899
+ self.n_layers = n_layers
900
+ self.kernel_size = kernel_size
901
+ self.p_dropout = p_dropout
902
+ self.resblock = resblock
903
+ self.resblock_kernel_sizes = resblock_kernel_sizes
904
+ self.resblock_dilation_sizes = resblock_dilation_sizes
905
+ self.upsample_rates = upsample_rates
906
+ self.upsample_initial_channel = upsample_initial_channel
907
+ self.upsample_kernel_sizes = upsample_kernel_sizes
908
+ self.segment_size = segment_size
909
+ self.gin_channels = gin_channels
910
+ # self.hop_length = hop_length#
911
+ self.spk_embed_dim = spk_embed_dim
912
+ self.enc_p = TextEncoder768(
913
+ inter_channels,
914
+ hidden_channels,
915
+ filter_channels,
916
+ n_heads,
917
+ n_layers,
918
+ kernel_size,
919
+ p_dropout,
920
+ f0=False,
921
+ )
922
+ self.dec = Generator(
923
+ inter_channels,
924
+ resblock,
925
+ resblock_kernel_sizes,
926
+ resblock_dilation_sizes,
927
+ upsample_rates,
928
+ upsample_initial_channel,
929
+ upsample_kernel_sizes,
930
+ gin_channels=gin_channels,
931
+ )
932
+ self.enc_q = PosteriorEncoder(
933
+ spec_channels,
934
+ inter_channels,
935
+ hidden_channels,
936
+ 5,
937
+ 1,
938
+ 16,
939
+ gin_channels=gin_channels,
940
+ )
941
+ self.flow = ResidualCouplingBlock(
942
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
943
+ )
944
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
945
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
946
+
947
+ def remove_weight_norm(self):
948
+ self.dec.remove_weight_norm()
949
+ self.flow.remove_weight_norm()
950
+ self.enc_q.remove_weight_norm()
951
+
952
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
953
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
954
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
955
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
956
+ z_p = self.flow(z, y_mask, g=g)
957
+ z_slice, ids_slice = commons.rand_slice_segments(
958
+ z, y_lengths, self.segment_size
959
+ )
960
+ o = self.dec(z_slice, g=g)
961
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
962
+
963
+ def infer(self, phone, phone_lengths, sid, rate=None):
964
+ g = self.emb_g(sid).unsqueeze(-1)
965
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
966
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
967
+ if rate:
968
+ head = int(z_p.shape[2] * rate)
969
+ z_p = z_p[:, :, -head:]
970
+ x_mask = x_mask[:, :, -head:]
971
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
972
+ o = self.dec(z * x_mask, g=g)
973
+ return o, x_mask, (z, z_p, m_p, logs_p)
974
+
975
+
976
+ class MultiPeriodDiscriminator(torch.nn.Module):
977
+ def __init__(self, use_spectral_norm=False):
978
+ super(MultiPeriodDiscriminator, self).__init__()
979
+ periods = [2, 3, 5, 7, 11, 17]
980
+ # periods = [3, 5, 7, 11, 17, 23, 37]
981
+
982
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
983
+ discs = discs + [
984
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
985
+ ]
986
+ self.discriminators = nn.ModuleList(discs)
987
+
988
+ def forward(self, y, y_hat):
989
+ y_d_rs = [] #
990
+ y_d_gs = []
991
+ fmap_rs = []
992
+ fmap_gs = []
993
+ for i, d in enumerate(self.discriminators):
994
+ y_d_r, fmap_r = d(y)
995
+ y_d_g, fmap_g = d(y_hat)
996
+ # for j in range(len(fmap_r)):
997
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
998
+ y_d_rs.append(y_d_r)
999
+ y_d_gs.append(y_d_g)
1000
+ fmap_rs.append(fmap_r)
1001
+ fmap_gs.append(fmap_g)
1002
+
1003
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1004
+
1005
+
1006
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
1007
+ def __init__(self, use_spectral_norm=False):
1008
+ super(MultiPeriodDiscriminatorV2, self).__init__()
1009
+ # periods = [2, 3, 5, 7, 11, 17]
1010
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
1011
+
1012
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1013
+ discs = discs + [
1014
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1015
+ ]
1016
+ self.discriminators = nn.ModuleList(discs)
1017
+
1018
+ def forward(self, y, y_hat):
1019
+ y_d_rs = [] #
1020
+ y_d_gs = []
1021
+ fmap_rs = []
1022
+ fmap_gs = []
1023
+ for i, d in enumerate(self.discriminators):
1024
+ y_d_r, fmap_r = d(y)
1025
+ y_d_g, fmap_g = d(y_hat)
1026
+ # for j in range(len(fmap_r)):
1027
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1028
+ y_d_rs.append(y_d_r)
1029
+ y_d_gs.append(y_d_g)
1030
+ fmap_rs.append(fmap_r)
1031
+ fmap_gs.append(fmap_g)
1032
+
1033
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1034
+
1035
+
1036
+ class DiscriminatorS(torch.nn.Module):
1037
+ def __init__(self, use_spectral_norm=False):
1038
+ super(DiscriminatorS, self).__init__()
1039
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1040
+ self.convs = nn.ModuleList(
1041
+ [
1042
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1043
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1044
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1045
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1046
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1047
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1048
+ ]
1049
+ )
1050
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1051
+
1052
+ def forward(self, x):
1053
+ fmap = []
1054
+
1055
+ for l in self.convs:
1056
+ x = l(x)
1057
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1058
+ fmap.append(x)
1059
+ x = self.conv_post(x)
1060
+ fmap.append(x)
1061
+ x = torch.flatten(x, 1, -1)
1062
+
1063
+ return x, fmap
1064
+
1065
+
1066
+ class DiscriminatorP(torch.nn.Module):
1067
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1068
+ super(DiscriminatorP, self).__init__()
1069
+ self.period = period
1070
+ self.use_spectral_norm = use_spectral_norm
1071
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1072
+ self.convs = nn.ModuleList(
1073
+ [
1074
+ norm_f(
1075
+ Conv2d(
1076
+ 1,
1077
+ 32,
1078
+ (kernel_size, 1),
1079
+ (stride, 1),
1080
+ padding=(get_padding(kernel_size, 1), 0),
1081
+ )
1082
+ ),
1083
+ norm_f(
1084
+ Conv2d(
1085
+ 32,
1086
+ 128,
1087
+ (kernel_size, 1),
1088
+ (stride, 1),
1089
+ padding=(get_padding(kernel_size, 1), 0),
1090
+ )
1091
+ ),
1092
+ norm_f(
1093
+ Conv2d(
1094
+ 128,
1095
+ 512,
1096
+ (kernel_size, 1),
1097
+ (stride, 1),
1098
+ padding=(get_padding(kernel_size, 1), 0),
1099
+ )
1100
+ ),
1101
+ norm_f(
1102
+ Conv2d(
1103
+ 512,
1104
+ 1024,
1105
+ (kernel_size, 1),
1106
+ (stride, 1),
1107
+ padding=(get_padding(kernel_size, 1), 0),
1108
+ )
1109
+ ),
1110
+ norm_f(
1111
+ Conv2d(
1112
+ 1024,
1113
+ 1024,
1114
+ (kernel_size, 1),
1115
+ 1,
1116
+ padding=(get_padding(kernel_size, 1), 0),
1117
+ )
1118
+ ),
1119
+ ]
1120
+ )
1121
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1122
+
1123
+ def forward(self, x):
1124
+ fmap = []
1125
+
1126
+ # 1d to 2d
1127
+ b, c, t = x.shape
1128
+ if t % self.period != 0: # pad first
1129
+ n_pad = self.period - (t % self.period)
1130
+ x = F.pad(x, (0, n_pad), "reflect")
1131
+ t = t + n_pad
1132
+ x = x.view(b, c, t // self.period, self.period)
1133
+
1134
+ for l in self.convs:
1135
+ x = l(x)
1136
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1137
+ fmap.append(x)
1138
+ x = self.conv_post(x)
1139
+ fmap.append(x)
1140
+ x = torch.flatten(x, 1, -1)
1141
+
1142
+ return x, fmap
Bocchi-the-Rock/lib/infer_pack/models_dml.py ADDED
@@ -0,0 +1,1124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math, pdb, os
2
+ from time import time as ttime
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from lib.infer_pack import modules
7
+ from lib.infer_pack import attentions
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack.commons import init_weights, get_padding
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from lib.infer_pack.commons import init_weights
13
+ import numpy as np
14
+ from lib.infer_pack import commons
15
+
16
+
17
+ class TextEncoder256(nn.Module):
18
+ def __init__(
19
+ self,
20
+ out_channels,
21
+ hidden_channels,
22
+ filter_channels,
23
+ n_heads,
24
+ n_layers,
25
+ kernel_size,
26
+ p_dropout,
27
+ f0=True,
28
+ ):
29
+ super().__init__()
30
+ self.out_channels = out_channels
31
+ self.hidden_channels = hidden_channels
32
+ self.filter_channels = filter_channels
33
+ self.n_heads = n_heads
34
+ self.n_layers = n_layers
35
+ self.kernel_size = kernel_size
36
+ self.p_dropout = p_dropout
37
+ self.emb_phone = nn.Linear(256, hidden_channels)
38
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
+ if f0 == True:
40
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
+ self.encoder = attentions.Encoder(
42
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
+ )
44
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
+
46
+ def forward(self, phone, pitch, lengths):
47
+ if pitch == None:
48
+ x = self.emb_phone(phone)
49
+ else:
50
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
+ x = self.lrelu(x)
53
+ x = torch.transpose(x, 1, -1) # [b, h, t]
54
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
+ x.dtype
56
+ )
57
+ x = self.encoder(x * x_mask, x_mask)
58
+ stats = self.proj(x) * x_mask
59
+
60
+ m, logs = torch.split(stats, self.out_channels, dim=1)
61
+ return m, logs, x_mask
62
+
63
+
64
+ class TextEncoder768(nn.Module):
65
+ def __init__(
66
+ self,
67
+ out_channels,
68
+ hidden_channels,
69
+ filter_channels,
70
+ n_heads,
71
+ n_layers,
72
+ kernel_size,
73
+ p_dropout,
74
+ f0=True,
75
+ ):
76
+ super().__init__()
77
+ self.out_channels = out_channels
78
+ self.hidden_channels = hidden_channels
79
+ self.filter_channels = filter_channels
80
+ self.n_heads = n_heads
81
+ self.n_layers = n_layers
82
+ self.kernel_size = kernel_size
83
+ self.p_dropout = p_dropout
84
+ self.emb_phone = nn.Linear(768, hidden_channels)
85
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
+ if f0 == True:
87
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
+ self.encoder = attentions.Encoder(
89
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
+ )
91
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
+
93
+ def forward(self, phone, pitch, lengths):
94
+ if pitch == None:
95
+ x = self.emb_phone(phone)
96
+ else:
97
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
+ x = self.lrelu(x)
100
+ x = torch.transpose(x, 1, -1) # [b, h, t]
101
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
+ x.dtype
103
+ )
104
+ x = self.encoder(x * x_mask, x_mask)
105
+ stats = self.proj(x) * x_mask
106
+
107
+ m, logs = torch.split(stats, self.out_channels, dim=1)
108
+ return m, logs, x_mask
109
+
110
+
111
+ class ResidualCouplingBlock(nn.Module):
112
+ def __init__(
113
+ self,
114
+ channels,
115
+ hidden_channels,
116
+ kernel_size,
117
+ dilation_rate,
118
+ n_layers,
119
+ n_flows=4,
120
+ gin_channels=0,
121
+ ):
122
+ super().__init__()
123
+ self.channels = channels
124
+ self.hidden_channels = hidden_channels
125
+ self.kernel_size = kernel_size
126
+ self.dilation_rate = dilation_rate
127
+ self.n_layers = n_layers
128
+ self.n_flows = n_flows
129
+ self.gin_channels = gin_channels
130
+
131
+ self.flows = nn.ModuleList()
132
+ for i in range(n_flows):
133
+ self.flows.append(
134
+ modules.ResidualCouplingLayer(
135
+ channels,
136
+ hidden_channels,
137
+ kernel_size,
138
+ dilation_rate,
139
+ n_layers,
140
+ gin_channels=gin_channels,
141
+ mean_only=True,
142
+ )
143
+ )
144
+ self.flows.append(modules.Flip())
145
+
146
+ def forward(self, x, x_mask, g=None, reverse=False):
147
+ if not reverse:
148
+ for flow in self.flows:
149
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
+ else:
151
+ for flow in reversed(self.flows):
152
+ x = flow(x, x_mask, g=g, reverse=reverse)
153
+ return x
154
+
155
+ def remove_weight_norm(self):
156
+ for i in range(self.n_flows):
157
+ self.flows[i * 2].remove_weight_norm()
158
+
159
+
160
+ class PosteriorEncoder(nn.Module):
161
+ def __init__(
162
+ self,
163
+ in_channels,
164
+ out_channels,
165
+ hidden_channels,
166
+ kernel_size,
167
+ dilation_rate,
168
+ n_layers,
169
+ gin_channels=0,
170
+ ):
171
+ super().__init__()
172
+ self.in_channels = in_channels
173
+ self.out_channels = out_channels
174
+ self.hidden_channels = hidden_channels
175
+ self.kernel_size = kernel_size
176
+ self.dilation_rate = dilation_rate
177
+ self.n_layers = n_layers
178
+ self.gin_channels = gin_channels
179
+
180
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
+ self.enc = modules.WN(
182
+ hidden_channels,
183
+ kernel_size,
184
+ dilation_rate,
185
+ n_layers,
186
+ gin_channels=gin_channels,
187
+ )
188
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
+
190
+ def forward(self, x, x_lengths, g=None):
191
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
+ x.dtype
193
+ )
194
+ x = self.pre(x) * x_mask
195
+ x = self.enc(x, x_mask, g=g)
196
+ stats = self.proj(x) * x_mask
197
+ m, logs = torch.split(stats, self.out_channels, dim=1)
198
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
+ return z, m, logs, x_mask
200
+
201
+ def remove_weight_norm(self):
202
+ self.enc.remove_weight_norm()
203
+
204
+
205
+ class Generator(torch.nn.Module):
206
+ def __init__(
207
+ self,
208
+ initial_channel,
209
+ resblock,
210
+ resblock_kernel_sizes,
211
+ resblock_dilation_sizes,
212
+ upsample_rates,
213
+ upsample_initial_channel,
214
+ upsample_kernel_sizes,
215
+ gin_channels=0,
216
+ ):
217
+ super(Generator, self).__init__()
218
+ self.num_kernels = len(resblock_kernel_sizes)
219
+ self.num_upsamples = len(upsample_rates)
220
+ self.conv_pre = Conv1d(
221
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
222
+ )
223
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
+
225
+ self.ups = nn.ModuleList()
226
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
+ self.ups.append(
228
+ weight_norm(
229
+ ConvTranspose1d(
230
+ upsample_initial_channel // (2**i),
231
+ upsample_initial_channel // (2 ** (i + 1)),
232
+ k,
233
+ u,
234
+ padding=(k - u) // 2,
235
+ )
236
+ )
237
+ )
238
+
239
+ self.resblocks = nn.ModuleList()
240
+ for i in range(len(self.ups)):
241
+ ch = upsample_initial_channel // (2 ** (i + 1))
242
+ for j, (k, d) in enumerate(
243
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
+ ):
245
+ self.resblocks.append(resblock(ch, k, d))
246
+
247
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
+ self.ups.apply(init_weights)
249
+
250
+ if gin_channels != 0:
251
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
+
253
+ def forward(self, x, g=None):
254
+ x = self.conv_pre(x)
255
+ if g is not None:
256
+ x = x + self.cond(g)
257
+
258
+ for i in range(self.num_upsamples):
259
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
+ x = self.ups[i](x)
261
+ xs = None
262
+ for j in range(self.num_kernels):
263
+ if xs is None:
264
+ xs = self.resblocks[i * self.num_kernels + j](x)
265
+ else:
266
+ xs += self.resblocks[i * self.num_kernels + j](x)
267
+ x = xs / self.num_kernels
268
+ x = F.leaky_relu(x)
269
+ x = self.conv_post(x)
270
+ x = torch.tanh(x)
271
+
272
+ return x
273
+
274
+ def remove_weight_norm(self):
275
+ for l in self.ups:
276
+ remove_weight_norm(l)
277
+ for l in self.resblocks:
278
+ l.remove_weight_norm()
279
+
280
+
281
+ class SineGen(torch.nn.Module):
282
+ """Definition of sine generator
283
+ SineGen(samp_rate, harmonic_num = 0,
284
+ sine_amp = 0.1, noise_std = 0.003,
285
+ voiced_threshold = 0,
286
+ flag_for_pulse=False)
287
+ samp_rate: sampling rate in Hz
288
+ harmonic_num: number of harmonic overtones (default 0)
289
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
290
+ noise_std: std of Gaussian noise (default 0.003)
291
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
+ Note: when flag_for_pulse is True, the first time step of a voiced
294
+ segment is always sin(np.pi) or cos(0)
295
+ """
296
+
297
+ def __init__(
298
+ self,
299
+ samp_rate,
300
+ harmonic_num=0,
301
+ sine_amp=0.1,
302
+ noise_std=0.003,
303
+ voiced_threshold=0,
304
+ flag_for_pulse=False,
305
+ ):
306
+ super(SineGen, self).__init__()
307
+ self.sine_amp = sine_amp
308
+ self.noise_std = noise_std
309
+ self.harmonic_num = harmonic_num
310
+ self.dim = self.harmonic_num + 1
311
+ self.sampling_rate = samp_rate
312
+ self.voiced_threshold = voiced_threshold
313
+
314
+ def _f02uv(self, f0):
315
+ # generate uv signal
316
+ uv = torch.ones_like(f0)
317
+ uv = uv * (f0 > self.voiced_threshold)
318
+ return uv.float()
319
+
320
+ def forward(self, f0, upp):
321
+ """sine_tensor, uv = forward(f0)
322
+ input F0: tensor(batchsize=1, length, dim=1)
323
+ f0 for unvoiced steps should be 0
324
+ output sine_tensor: tensor(batchsize=1, length, dim)
325
+ output uv: tensor(batchsize=1, length, 1)
326
+ """
327
+ with torch.no_grad():
328
+ f0 = f0[:, None].transpose(1, 2)
329
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
+ # fundamental component
331
+ f0_buf[:, :, 0] = f0[:, :, 0]
332
+ for idx in np.arange(self.harmonic_num):
333
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
+ idx + 2
335
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
+ rand_ini = torch.rand(
338
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
+ )
340
+ rand_ini[:, 0] = 0
341
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
+ tmp_over_one *= upp
344
+ tmp_over_one = F.interpolate(
345
+ tmp_over_one.transpose(2, 1),
346
+ scale_factor=upp,
347
+ mode="linear",
348
+ align_corners=True,
349
+ ).transpose(2, 1)
350
+ rad_values = F.interpolate(
351
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
+ ).transpose(
353
+ 2, 1
354
+ ) #######
355
+ tmp_over_one %= 1
356
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
+ cumsum_shift = torch.zeros_like(rad_values)
358
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
+ sine_waves = torch.sin(
360
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
+ )
362
+ sine_waves = sine_waves * self.sine_amp
363
+ uv = self._f02uv(f0)
364
+ uv = F.interpolate(
365
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
+ ).transpose(2, 1)
367
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
+ noise = noise_amp * torch.randn_like(sine_waves)
369
+ sine_waves = sine_waves * uv + noise
370
+ return sine_waves, uv, noise
371
+
372
+
373
+ class SourceModuleHnNSF(torch.nn.Module):
374
+ """SourceModule for hn-nsf
375
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
+ add_noise_std=0.003, voiced_threshod=0)
377
+ sampling_rate: sampling_rate in Hz
378
+ harmonic_num: number of harmonic above F0 (default: 0)
379
+ sine_amp: amplitude of sine source signal (default: 0.1)
380
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
381
+ note that amplitude of noise in unvoiced is decided
382
+ by sine_amp
383
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
384
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
+ F0_sampled (batchsize, length, 1)
386
+ Sine_source (batchsize, length, 1)
387
+ noise_source (batchsize, length 1)
388
+ uv (batchsize, length, 1)
389
+ """
390
+
391
+ def __init__(
392
+ self,
393
+ sampling_rate,
394
+ harmonic_num=0,
395
+ sine_amp=0.1,
396
+ add_noise_std=0.003,
397
+ voiced_threshod=0,
398
+ is_half=True,
399
+ ):
400
+ super(SourceModuleHnNSF, self).__init__()
401
+
402
+ self.sine_amp = sine_amp
403
+ self.noise_std = add_noise_std
404
+ self.is_half = is_half
405
+ # to produce sine waveforms
406
+ self.l_sin_gen = SineGen(
407
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
+ )
409
+
410
+ # to merge source harmonics into a single excitation
411
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
+ self.l_tanh = torch.nn.Tanh()
413
+
414
+ def forward(self, x, upp=None):
415
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
+ if self.is_half:
417
+ sine_wavs = sine_wavs.half()
418
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
+ return sine_merge, None, None # noise, uv
420
+
421
+
422
+ class GeneratorNSF(torch.nn.Module):
423
+ def __init__(
424
+ self,
425
+ initial_channel,
426
+ resblock,
427
+ resblock_kernel_sizes,
428
+ resblock_dilation_sizes,
429
+ upsample_rates,
430
+ upsample_initial_channel,
431
+ upsample_kernel_sizes,
432
+ gin_channels,
433
+ sr,
434
+ is_half=False,
435
+ ):
436
+ super(GeneratorNSF, self).__init__()
437
+ self.num_kernels = len(resblock_kernel_sizes)
438
+ self.num_upsamples = len(upsample_rates)
439
+
440
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
+ self.m_source = SourceModuleHnNSF(
442
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
443
+ )
444
+ self.noise_convs = nn.ModuleList()
445
+ self.conv_pre = Conv1d(
446
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
447
+ )
448
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
+
450
+ self.ups = nn.ModuleList()
451
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
453
+ self.ups.append(
454
+ weight_norm(
455
+ ConvTranspose1d(
456
+ upsample_initial_channel // (2**i),
457
+ upsample_initial_channel // (2 ** (i + 1)),
458
+ k,
459
+ u,
460
+ padding=(k - u) // 2,
461
+ )
462
+ )
463
+ )
464
+ if i + 1 < len(upsample_rates):
465
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
466
+ self.noise_convs.append(
467
+ Conv1d(
468
+ 1,
469
+ c_cur,
470
+ kernel_size=stride_f0 * 2,
471
+ stride=stride_f0,
472
+ padding=stride_f0 // 2,
473
+ )
474
+ )
475
+ else:
476
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
+
478
+ self.resblocks = nn.ModuleList()
479
+ for i in range(len(self.ups)):
480
+ ch = upsample_initial_channel // (2 ** (i + 1))
481
+ for j, (k, d) in enumerate(
482
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
+ ):
484
+ self.resblocks.append(resblock(ch, k, d))
485
+
486
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
+ self.ups.apply(init_weights)
488
+
489
+ if gin_channels != 0:
490
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
+
492
+ self.upp = np.prod(upsample_rates)
493
+
494
+ def forward(self, x, f0, g=None):
495
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
496
+ har_source = har_source.transpose(1, 2)
497
+ x = self.conv_pre(x)
498
+ if g is not None:
499
+ x = x + self.cond(g)
500
+
501
+ for i in range(self.num_upsamples):
502
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
+ x = self.ups[i](x)
504
+ x_source = self.noise_convs[i](har_source)
505
+ x = x + x_source
506
+ xs = None
507
+ for j in range(self.num_kernels):
508
+ if xs is None:
509
+ xs = self.resblocks[i * self.num_kernels + j](x)
510
+ else:
511
+ xs += self.resblocks[i * self.num_kernels + j](x)
512
+ x = xs / self.num_kernels
513
+ x = F.leaky_relu(x)
514
+ x = self.conv_post(x)
515
+ x = torch.tanh(x)
516
+ return x
517
+
518
+ def remove_weight_norm(self):
519
+ for l in self.ups:
520
+ remove_weight_norm(l)
521
+ for l in self.resblocks:
522
+ l.remove_weight_norm()
523
+
524
+
525
+ sr2sr = {
526
+ "32k": 32000,
527
+ "40k": 40000,
528
+ "48k": 48000,
529
+ }
530
+
531
+
532
+ class SynthesizerTrnMs256NSFsid(nn.Module):
533
+ def __init__(
534
+ self,
535
+ spec_channels,
536
+ segment_size,
537
+ inter_channels,
538
+ hidden_channels,
539
+ filter_channels,
540
+ n_heads,
541
+ n_layers,
542
+ kernel_size,
543
+ p_dropout,
544
+ resblock,
545
+ resblock_kernel_sizes,
546
+ resblock_dilation_sizes,
547
+ upsample_rates,
548
+ upsample_initial_channel,
549
+ upsample_kernel_sizes,
550
+ spk_embed_dim,
551
+ gin_channels,
552
+ sr,
553
+ **kwargs
554
+ ):
555
+ super().__init__()
556
+ if type(sr) == type("strr"):
557
+ sr = sr2sr[sr]
558
+ self.spec_channels = spec_channels
559
+ self.inter_channels = inter_channels
560
+ self.hidden_channels = hidden_channels
561
+ self.filter_channels = filter_channels
562
+ self.n_heads = n_heads
563
+ self.n_layers = n_layers
564
+ self.kernel_size = kernel_size
565
+ self.p_dropout = p_dropout
566
+ self.resblock = resblock
567
+ self.resblock_kernel_sizes = resblock_kernel_sizes
568
+ self.resblock_dilation_sizes = resblock_dilation_sizes
569
+ self.upsample_rates = upsample_rates
570
+ self.upsample_initial_channel = upsample_initial_channel
571
+ self.upsample_kernel_sizes = upsample_kernel_sizes
572
+ self.segment_size = segment_size
573
+ self.gin_channels = gin_channels
574
+ # self.hop_length = hop_length#
575
+ self.spk_embed_dim = spk_embed_dim
576
+ self.enc_p = TextEncoder256(
577
+ inter_channels,
578
+ hidden_channels,
579
+ filter_channels,
580
+ n_heads,
581
+ n_layers,
582
+ kernel_size,
583
+ p_dropout,
584
+ )
585
+ self.dec = GeneratorNSF(
586
+ inter_channels,
587
+ resblock,
588
+ resblock_kernel_sizes,
589
+ resblock_dilation_sizes,
590
+ upsample_rates,
591
+ upsample_initial_channel,
592
+ upsample_kernel_sizes,
593
+ gin_channels=gin_channels,
594
+ sr=sr,
595
+ is_half=kwargs["is_half"],
596
+ )
597
+ self.enc_q = PosteriorEncoder(
598
+ spec_channels,
599
+ inter_channels,
600
+ hidden_channels,
601
+ 5,
602
+ 1,
603
+ 16,
604
+ gin_channels=gin_channels,
605
+ )
606
+ self.flow = ResidualCouplingBlock(
607
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
608
+ )
609
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
610
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
611
+
612
+ def remove_weight_norm(self):
613
+ self.dec.remove_weight_norm()
614
+ self.flow.remove_weight_norm()
615
+ self.enc_q.remove_weight_norm()
616
+
617
+ def forward(
618
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
619
+ ): # 这里ds是id,[bs,1]
620
+ # print(1,pitch.shape)#[bs,t]
621
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
622
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
623
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
624
+ z_p = self.flow(z, y_mask, g=g)
625
+ z_slice, ids_slice = commons.rand_slice_segments(
626
+ z, y_lengths, self.segment_size
627
+ )
628
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
629
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
630
+ # print(-2,pitchf.shape,z_slice.shape)
631
+ o = self.dec(z_slice, pitchf, g=g)
632
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
633
+
634
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
635
+ g = self.emb_g(sid).unsqueeze(-1)
636
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
637
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
638
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
639
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
640
+ return o, x_mask, (z, z_p, m_p, logs_p)
641
+
642
+
643
+ class SynthesizerTrnMs768NSFsid(nn.Module):
644
+ def __init__(
645
+ self,
646
+ spec_channels,
647
+ segment_size,
648
+ inter_channels,
649
+ hidden_channels,
650
+ filter_channels,
651
+ n_heads,
652
+ n_layers,
653
+ kernel_size,
654
+ p_dropout,
655
+ resblock,
656
+ resblock_kernel_sizes,
657
+ resblock_dilation_sizes,
658
+ upsample_rates,
659
+ upsample_initial_channel,
660
+ upsample_kernel_sizes,
661
+ spk_embed_dim,
662
+ gin_channels,
663
+ sr,
664
+ **kwargs
665
+ ):
666
+ super().__init__()
667
+ if type(sr) == type("strr"):
668
+ sr = sr2sr[sr]
669
+ self.spec_channels = spec_channels
670
+ self.inter_channels = inter_channels
671
+ self.hidden_channels = hidden_channels
672
+ self.filter_channels = filter_channels
673
+ self.n_heads = n_heads
674
+ self.n_layers = n_layers
675
+ self.kernel_size = kernel_size
676
+ self.p_dropout = p_dropout
677
+ self.resblock = resblock
678
+ self.resblock_kernel_sizes = resblock_kernel_sizes
679
+ self.resblock_dilation_sizes = resblock_dilation_sizes
680
+ self.upsample_rates = upsample_rates
681
+ self.upsample_initial_channel = upsample_initial_channel
682
+ self.upsample_kernel_sizes = upsample_kernel_sizes
683
+ self.segment_size = segment_size
684
+ self.gin_channels = gin_channels
685
+ # self.hop_length = hop_length#
686
+ self.spk_embed_dim = spk_embed_dim
687
+ self.enc_p = TextEncoder768(
688
+ inter_channels,
689
+ hidden_channels,
690
+ filter_channels,
691
+ n_heads,
692
+ n_layers,
693
+ kernel_size,
694
+ p_dropout,
695
+ )
696
+ self.dec = GeneratorNSF(
697
+ inter_channels,
698
+ resblock,
699
+ resblock_kernel_sizes,
700
+ resblock_dilation_sizes,
701
+ upsample_rates,
702
+ upsample_initial_channel,
703
+ upsample_kernel_sizes,
704
+ gin_channels=gin_channels,
705
+ sr=sr,
706
+ is_half=kwargs["is_half"],
707
+ )
708
+ self.enc_q = PosteriorEncoder(
709
+ spec_channels,
710
+ inter_channels,
711
+ hidden_channels,
712
+ 5,
713
+ 1,
714
+ 16,
715
+ gin_channels=gin_channels,
716
+ )
717
+ self.flow = ResidualCouplingBlock(
718
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
719
+ )
720
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
721
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
722
+
723
+ def remove_weight_norm(self):
724
+ self.dec.remove_weight_norm()
725
+ self.flow.remove_weight_norm()
726
+ self.enc_q.remove_weight_norm()
727
+
728
+ def forward(
729
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
730
+ ): # 这里ds是id,[bs,1]
731
+ # print(1,pitch.shape)#[bs,t]
732
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
733
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
734
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
735
+ z_p = self.flow(z, y_mask, g=g)
736
+ z_slice, ids_slice = commons.rand_slice_segments(
737
+ z, y_lengths, self.segment_size
738
+ )
739
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
740
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
741
+ # print(-2,pitchf.shape,z_slice.shape)
742
+ o = self.dec(z_slice, pitchf, g=g)
743
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
744
+
745
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
746
+ g = self.emb_g(sid).unsqueeze(-1)
747
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
748
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
749
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
750
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
751
+ return o, x_mask, (z, z_p, m_p, logs_p)
752
+
753
+
754
+ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
755
+ def __init__(
756
+ self,
757
+ spec_channels,
758
+ segment_size,
759
+ inter_channels,
760
+ hidden_channels,
761
+ filter_channels,
762
+ n_heads,
763
+ n_layers,
764
+ kernel_size,
765
+ p_dropout,
766
+ resblock,
767
+ resblock_kernel_sizes,
768
+ resblock_dilation_sizes,
769
+ upsample_rates,
770
+ upsample_initial_channel,
771
+ upsample_kernel_sizes,
772
+ spk_embed_dim,
773
+ gin_channels,
774
+ sr=None,
775
+ **kwargs
776
+ ):
777
+ super().__init__()
778
+ self.spec_channels = spec_channels
779
+ self.inter_channels = inter_channels
780
+ self.hidden_channels = hidden_channels
781
+ self.filter_channels = filter_channels
782
+ self.n_heads = n_heads
783
+ self.n_layers = n_layers
784
+ self.kernel_size = kernel_size
785
+ self.p_dropout = p_dropout
786
+ self.resblock = resblock
787
+ self.resblock_kernel_sizes = resblock_kernel_sizes
788
+ self.resblock_dilation_sizes = resblock_dilation_sizes
789
+ self.upsample_rates = upsample_rates
790
+ self.upsample_initial_channel = upsample_initial_channel
791
+ self.upsample_kernel_sizes = upsample_kernel_sizes
792
+ self.segment_size = segment_size
793
+ self.gin_channels = gin_channels
794
+ # self.hop_length = hop_length#
795
+ self.spk_embed_dim = spk_embed_dim
796
+ self.enc_p = TextEncoder256(
797
+ inter_channels,
798
+ hidden_channels,
799
+ filter_channels,
800
+ n_heads,
801
+ n_layers,
802
+ kernel_size,
803
+ p_dropout,
804
+ f0=False,
805
+ )
806
+ self.dec = Generator(
807
+ inter_channels,
808
+ resblock,
809
+ resblock_kernel_sizes,
810
+ resblock_dilation_sizes,
811
+ upsample_rates,
812
+ upsample_initial_channel,
813
+ upsample_kernel_sizes,
814
+ gin_channels=gin_channels,
815
+ )
816
+ self.enc_q = PosteriorEncoder(
817
+ spec_channels,
818
+ inter_channels,
819
+ hidden_channels,
820
+ 5,
821
+ 1,
822
+ 16,
823
+ gin_channels=gin_channels,
824
+ )
825
+ self.flow = ResidualCouplingBlock(
826
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
827
+ )
828
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
829
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
830
+
831
+ def remove_weight_norm(self):
832
+ self.dec.remove_weight_norm()
833
+ self.flow.remove_weight_norm()
834
+ self.enc_q.remove_weight_norm()
835
+
836
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
837
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
838
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
839
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
840
+ z_p = self.flow(z, y_mask, g=g)
841
+ z_slice, ids_slice = commons.rand_slice_segments(
842
+ z, y_lengths, self.segment_size
843
+ )
844
+ o = self.dec(z_slice, g=g)
845
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
846
+
847
+ def infer(self, phone, phone_lengths, sid, max_len=None):
848
+ g = self.emb_g(sid).unsqueeze(-1)
849
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
850
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
851
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
852
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
853
+ return o, x_mask, (z, z_p, m_p, logs_p)
854
+
855
+
856
+ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
857
+ def __init__(
858
+ self,
859
+ spec_channels,
860
+ segment_size,
861
+ inter_channels,
862
+ hidden_channels,
863
+ filter_channels,
864
+ n_heads,
865
+ n_layers,
866
+ kernel_size,
867
+ p_dropout,
868
+ resblock,
869
+ resblock_kernel_sizes,
870
+ resblock_dilation_sizes,
871
+ upsample_rates,
872
+ upsample_initial_channel,
873
+ upsample_kernel_sizes,
874
+ spk_embed_dim,
875
+ gin_channels,
876
+ sr=None,
877
+ **kwargs
878
+ ):
879
+ super().__init__()
880
+ self.spec_channels = spec_channels
881
+ self.inter_channels = inter_channels
882
+ self.hidden_channels = hidden_channels
883
+ self.filter_channels = filter_channels
884
+ self.n_heads = n_heads
885
+ self.n_layers = n_layers
886
+ self.kernel_size = kernel_size
887
+ self.p_dropout = p_dropout
888
+ self.resblock = resblock
889
+ self.resblock_kernel_sizes = resblock_kernel_sizes
890
+ self.resblock_dilation_sizes = resblock_dilation_sizes
891
+ self.upsample_rates = upsample_rates
892
+ self.upsample_initial_channel = upsample_initial_channel
893
+ self.upsample_kernel_sizes = upsample_kernel_sizes
894
+ self.segment_size = segment_size
895
+ self.gin_channels = gin_channels
896
+ # self.hop_length = hop_length#
897
+ self.spk_embed_dim = spk_embed_dim
898
+ self.enc_p = TextEncoder768(
899
+ inter_channels,
900
+ hidden_channels,
901
+ filter_channels,
902
+ n_heads,
903
+ n_layers,
904
+ kernel_size,
905
+ p_dropout,
906
+ f0=False,
907
+ )
908
+ self.dec = Generator(
909
+ inter_channels,
910
+ resblock,
911
+ resblock_kernel_sizes,
912
+ resblock_dilation_sizes,
913
+ upsample_rates,
914
+ upsample_initial_channel,
915
+ upsample_kernel_sizes,
916
+ gin_channels=gin_channels,
917
+ )
918
+ self.enc_q = PosteriorEncoder(
919
+ spec_channels,
920
+ inter_channels,
921
+ hidden_channels,
922
+ 5,
923
+ 1,
924
+ 16,
925
+ gin_channels=gin_channels,
926
+ )
927
+ self.flow = ResidualCouplingBlock(
928
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
929
+ )
930
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
931
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
932
+
933
+ def remove_weight_norm(self):
934
+ self.dec.remove_weight_norm()
935
+ self.flow.remove_weight_norm()
936
+ self.enc_q.remove_weight_norm()
937
+
938
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
939
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
940
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
941
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
942
+ z_p = self.flow(z, y_mask, g=g)
943
+ z_slice, ids_slice = commons.rand_slice_segments(
944
+ z, y_lengths, self.segment_size
945
+ )
946
+ o = self.dec(z_slice, g=g)
947
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
948
+
949
+ def infer(self, phone, phone_lengths, sid, max_len=None):
950
+ g = self.emb_g(sid).unsqueeze(-1)
951
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
952
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
953
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
954
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
955
+ return o, x_mask, (z, z_p, m_p, logs_p)
956
+
957
+
958
+ class MultiPeriodDiscriminator(torch.nn.Module):
959
+ def __init__(self, use_spectral_norm=False):
960
+ super(MultiPeriodDiscriminator, self).__init__()
961
+ periods = [2, 3, 5, 7, 11, 17]
962
+ # periods = [3, 5, 7, 11, 17, 23, 37]
963
+
964
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
965
+ discs = discs + [
966
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
967
+ ]
968
+ self.discriminators = nn.ModuleList(discs)
969
+
970
+ def forward(self, y, y_hat):
971
+ y_d_rs = [] #
972
+ y_d_gs = []
973
+ fmap_rs = []
974
+ fmap_gs = []
975
+ for i, d in enumerate(self.discriminators):
976
+ y_d_r, fmap_r = d(y)
977
+ y_d_g, fmap_g = d(y_hat)
978
+ # for j in range(len(fmap_r)):
979
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
980
+ y_d_rs.append(y_d_r)
981
+ y_d_gs.append(y_d_g)
982
+ fmap_rs.append(fmap_r)
983
+ fmap_gs.append(fmap_g)
984
+
985
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
986
+
987
+
988
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
989
+ def __init__(self, use_spectral_norm=False):
990
+ super(MultiPeriodDiscriminatorV2, self).__init__()
991
+ # periods = [2, 3, 5, 7, 11, 17]
992
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
993
+
994
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
995
+ discs = discs + [
996
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
997
+ ]
998
+ self.discriminators = nn.ModuleList(discs)
999
+
1000
+ def forward(self, y, y_hat):
1001
+ y_d_rs = [] #
1002
+ y_d_gs = []
1003
+ fmap_rs = []
1004
+ fmap_gs = []
1005
+ for i, d in enumerate(self.discriminators):
1006
+ y_d_r, fmap_r = d(y)
1007
+ y_d_g, fmap_g = d(y_hat)
1008
+ # for j in range(len(fmap_r)):
1009
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1010
+ y_d_rs.append(y_d_r)
1011
+ y_d_gs.append(y_d_g)
1012
+ fmap_rs.append(fmap_r)
1013
+ fmap_gs.append(fmap_g)
1014
+
1015
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1016
+
1017
+
1018
+ class DiscriminatorS(torch.nn.Module):
1019
+ def __init__(self, use_spectral_norm=False):
1020
+ super(DiscriminatorS, self).__init__()
1021
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1022
+ self.convs = nn.ModuleList(
1023
+ [
1024
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1025
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1026
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1027
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1028
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1029
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1030
+ ]
1031
+ )
1032
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1033
+
1034
+ def forward(self, x):
1035
+ fmap = []
1036
+
1037
+ for l in self.convs:
1038
+ x = l(x)
1039
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1040
+ fmap.append(x)
1041
+ x = self.conv_post(x)
1042
+ fmap.append(x)
1043
+ x = torch.flatten(x, 1, -1)
1044
+
1045
+ return x, fmap
1046
+
1047
+
1048
+ class DiscriminatorP(torch.nn.Module):
1049
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1050
+ super(DiscriminatorP, self).__init__()
1051
+ self.period = period
1052
+ self.use_spectral_norm = use_spectral_norm
1053
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1054
+ self.convs = nn.ModuleList(
1055
+ [
1056
+ norm_f(
1057
+ Conv2d(
1058
+ 1,
1059
+ 32,
1060
+ (kernel_size, 1),
1061
+ (stride, 1),
1062
+ padding=(get_padding(kernel_size, 1), 0),
1063
+ )
1064
+ ),
1065
+ norm_f(
1066
+ Conv2d(
1067
+ 32,
1068
+ 128,
1069
+ (kernel_size, 1),
1070
+ (stride, 1),
1071
+ padding=(get_padding(kernel_size, 1), 0),
1072
+ )
1073
+ ),
1074
+ norm_f(
1075
+ Conv2d(
1076
+ 128,
1077
+ 512,
1078
+ (kernel_size, 1),
1079
+ (stride, 1),
1080
+ padding=(get_padding(kernel_size, 1), 0),
1081
+ )
1082
+ ),
1083
+ norm_f(
1084
+ Conv2d(
1085
+ 512,
1086
+ 1024,
1087
+ (kernel_size, 1),
1088
+ (stride, 1),
1089
+ padding=(get_padding(kernel_size, 1), 0),
1090
+ )
1091
+ ),
1092
+ norm_f(
1093
+ Conv2d(
1094
+ 1024,
1095
+ 1024,
1096
+ (kernel_size, 1),
1097
+ 1,
1098
+ padding=(get_padding(kernel_size, 1), 0),
1099
+ )
1100
+ ),
1101
+ ]
1102
+ )
1103
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1104
+
1105
+ def forward(self, x):
1106
+ fmap = []
1107
+
1108
+ # 1d to 2d
1109
+ b, c, t = x.shape
1110
+ if t % self.period != 0: # pad first
1111
+ n_pad = self.period - (t % self.period)
1112
+ x = F.pad(x, (0, n_pad), "reflect")
1113
+ t = t + n_pad
1114
+ x = x.view(b, c, t // self.period, self.period)
1115
+
1116
+ for l in self.convs:
1117
+ x = l(x)
1118
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1119
+ fmap.append(x)
1120
+ x = self.conv_post(x)
1121
+ fmap.append(x)
1122
+ x = torch.flatten(x, 1, -1)
1123
+
1124
+ return x, fmap
Bocchi-the-Rock/lib/infer_pack/models_onnx.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math, pdb, os
2
+ from time import time as ttime
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from lib.infer_pack import modules
7
+ from lib.infer_pack import attentions
8
+ from lib.infer_pack import commons
9
+ from lib.infer_pack.commons import init_weights, get_padding
10
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from lib.infer_pack.commons import init_weights
13
+ import numpy as np
14
+ from lib.infer_pack import commons
15
+
16
+
17
+ class TextEncoder256(nn.Module):
18
+ def __init__(
19
+ self,
20
+ out_channels,
21
+ hidden_channels,
22
+ filter_channels,
23
+ n_heads,
24
+ n_layers,
25
+ kernel_size,
26
+ p_dropout,
27
+ f0=True,
28
+ ):
29
+ super().__init__()
30
+ self.out_channels = out_channels
31
+ self.hidden_channels = hidden_channels
32
+ self.filter_channels = filter_channels
33
+ self.n_heads = n_heads
34
+ self.n_layers = n_layers
35
+ self.kernel_size = kernel_size
36
+ self.p_dropout = p_dropout
37
+ self.emb_phone = nn.Linear(256, hidden_channels)
38
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
+ if f0 == True:
40
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
+ self.encoder = attentions.Encoder(
42
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
+ )
44
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
+
46
+ def forward(self, phone, pitch, lengths):
47
+ if pitch == None:
48
+ x = self.emb_phone(phone)
49
+ else:
50
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
+ x = self.lrelu(x)
53
+ x = torch.transpose(x, 1, -1) # [b, h, t]
54
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
+ x.dtype
56
+ )
57
+ x = self.encoder(x * x_mask, x_mask)
58
+ stats = self.proj(x) * x_mask
59
+
60
+ m, logs = torch.split(stats, self.out_channels, dim=1)
61
+ return m, logs, x_mask
62
+
63
+
64
+ class TextEncoder768(nn.Module):
65
+ def __init__(
66
+ self,
67
+ out_channels,
68
+ hidden_channels,
69
+ filter_channels,
70
+ n_heads,
71
+ n_layers,
72
+ kernel_size,
73
+ p_dropout,
74
+ f0=True,
75
+ ):
76
+ super().__init__()
77
+ self.out_channels = out_channels
78
+ self.hidden_channels = hidden_channels
79
+ self.filter_channels = filter_channels
80
+ self.n_heads = n_heads
81
+ self.n_layers = n_layers
82
+ self.kernel_size = kernel_size
83
+ self.p_dropout = p_dropout
84
+ self.emb_phone = nn.Linear(768, hidden_channels)
85
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
+ if f0 == True:
87
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
+ self.encoder = attentions.Encoder(
89
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
+ )
91
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
+
93
+ def forward(self, phone, pitch, lengths):
94
+ if pitch == None:
95
+ x = self.emb_phone(phone)
96
+ else:
97
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
+ x = self.lrelu(x)
100
+ x = torch.transpose(x, 1, -1) # [b, h, t]
101
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
+ x.dtype
103
+ )
104
+ x = self.encoder(x * x_mask, x_mask)
105
+ stats = self.proj(x) * x_mask
106
+
107
+ m, logs = torch.split(stats, self.out_channels, dim=1)
108
+ return m, logs, x_mask
109
+
110
+
111
+ class ResidualCouplingBlock(nn.Module):
112
+ def __init__(
113
+ self,
114
+ channels,
115
+ hidden_channels,
116
+ kernel_size,
117
+ dilation_rate,
118
+ n_layers,
119
+ n_flows=4,
120
+ gin_channels=0,
121
+ ):
122
+ super().__init__()
123
+ self.channels = channels
124
+ self.hidden_channels = hidden_channels
125
+ self.kernel_size = kernel_size
126
+ self.dilation_rate = dilation_rate
127
+ self.n_layers = n_layers
128
+ self.n_flows = n_flows
129
+ self.gin_channels = gin_channels
130
+
131
+ self.flows = nn.ModuleList()
132
+ for i in range(n_flows):
133
+ self.flows.append(
134
+ modules.ResidualCouplingLayer(
135
+ channels,
136
+ hidden_channels,
137
+ kernel_size,
138
+ dilation_rate,
139
+ n_layers,
140
+ gin_channels=gin_channels,
141
+ mean_only=True,
142
+ )
143
+ )
144
+ self.flows.append(modules.Flip())
145
+
146
+ def forward(self, x, x_mask, g=None, reverse=False):
147
+ if not reverse:
148
+ for flow in self.flows:
149
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
+ else:
151
+ for flow in reversed(self.flows):
152
+ x = flow(x, x_mask, g=g, reverse=reverse)
153
+ return x
154
+
155
+ def remove_weight_norm(self):
156
+ for i in range(self.n_flows):
157
+ self.flows[i * 2].remove_weight_norm()
158
+
159
+
160
+ class PosteriorEncoder(nn.Module):
161
+ def __init__(
162
+ self,
163
+ in_channels,
164
+ out_channels,
165
+ hidden_channels,
166
+ kernel_size,
167
+ dilation_rate,
168
+ n_layers,
169
+ gin_channels=0,
170
+ ):
171
+ super().__init__()
172
+ self.in_channels = in_channels
173
+ self.out_channels = out_channels
174
+ self.hidden_channels = hidden_channels
175
+ self.kernel_size = kernel_size
176
+ self.dilation_rate = dilation_rate
177
+ self.n_layers = n_layers
178
+ self.gin_channels = gin_channels
179
+
180
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
+ self.enc = modules.WN(
182
+ hidden_channels,
183
+ kernel_size,
184
+ dilation_rate,
185
+ n_layers,
186
+ gin_channels=gin_channels,
187
+ )
188
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
+
190
+ def forward(self, x, x_lengths, g=None):
191
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
+ x.dtype
193
+ )
194
+ x = self.pre(x) * x_mask
195
+ x = self.enc(x, x_mask, g=g)
196
+ stats = self.proj(x) * x_mask
197
+ m, logs = torch.split(stats, self.out_channels, dim=1)
198
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
+ return z, m, logs, x_mask
200
+
201
+ def remove_weight_norm(self):
202
+ self.enc.remove_weight_norm()
203
+
204
+
205
+ class Generator(torch.nn.Module):
206
+ def __init__(
207
+ self,
208
+ initial_channel,
209
+ resblock,
210
+ resblock_kernel_sizes,
211
+ resblock_dilation_sizes,
212
+ upsample_rates,
213
+ upsample_initial_channel,
214
+ upsample_kernel_sizes,
215
+ gin_channels=0,
216
+ ):
217
+ super(Generator, self).__init__()
218
+ self.num_kernels = len(resblock_kernel_sizes)
219
+ self.num_upsamples = len(upsample_rates)
220
+ self.conv_pre = Conv1d(
221
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
222
+ )
223
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
+
225
+ self.ups = nn.ModuleList()
226
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
+ self.ups.append(
228
+ weight_norm(
229
+ ConvTranspose1d(
230
+ upsample_initial_channel // (2**i),
231
+ upsample_initial_channel // (2 ** (i + 1)),
232
+ k,
233
+ u,
234
+ padding=(k - u) // 2,
235
+ )
236
+ )
237
+ )
238
+
239
+ self.resblocks = nn.ModuleList()
240
+ for i in range(len(self.ups)):
241
+ ch = upsample_initial_channel // (2 ** (i + 1))
242
+ for j, (k, d) in enumerate(
243
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
+ ):
245
+ self.resblocks.append(resblock(ch, k, d))
246
+
247
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
+ self.ups.apply(init_weights)
249
+
250
+ if gin_channels != 0:
251
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
+
253
+ def forward(self, x, g=None):
254
+ x = self.conv_pre(x)
255
+ if g is not None:
256
+ x = x + self.cond(g)
257
+
258
+ for i in range(self.num_upsamples):
259
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
+ x = self.ups[i](x)
261
+ xs = None
262
+ for j in range(self.num_kernels):
263
+ if xs is None:
264
+ xs = self.resblocks[i * self.num_kernels + j](x)
265
+ else:
266
+ xs += self.resblocks[i * self.num_kernels + j](x)
267
+ x = xs / self.num_kernels
268
+ x = F.leaky_relu(x)
269
+ x = self.conv_post(x)
270
+ x = torch.tanh(x)
271
+
272
+ return x
273
+
274
+ def remove_weight_norm(self):
275
+ for l in self.ups:
276
+ remove_weight_norm(l)
277
+ for l in self.resblocks:
278
+ l.remove_weight_norm()
279
+
280
+
281
+ class SineGen(torch.nn.Module):
282
+ """Definition of sine generator
283
+ SineGen(samp_rate, harmonic_num = 0,
284
+ sine_amp = 0.1, noise_std = 0.003,
285
+ voiced_threshold = 0,
286
+ flag_for_pulse=False)
287
+ samp_rate: sampling rate in Hz
288
+ harmonic_num: number of harmonic overtones (default 0)
289
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
290
+ noise_std: std of Gaussian noise (default 0.003)
291
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
+ Note: when flag_for_pulse is True, the first time step of a voiced
294
+ segment is always sin(np.pi) or cos(0)
295
+ """
296
+
297
+ def __init__(
298
+ self,
299
+ samp_rate,
300
+ harmonic_num=0,
301
+ sine_amp=0.1,
302
+ noise_std=0.003,
303
+ voiced_threshold=0,
304
+ flag_for_pulse=False,
305
+ ):
306
+ super(SineGen, self).__init__()
307
+ self.sine_amp = sine_amp
308
+ self.noise_std = noise_std
309
+ self.harmonic_num = harmonic_num
310
+ self.dim = self.harmonic_num + 1
311
+ self.sampling_rate = samp_rate
312
+ self.voiced_threshold = voiced_threshold
313
+
314
+ def _f02uv(self, f0):
315
+ # generate uv signal
316
+ uv = torch.ones_like(f0)
317
+ uv = uv * (f0 > self.voiced_threshold)
318
+ return uv
319
+
320
+ def forward(self, f0, upp):
321
+ """sine_tensor, uv = forward(f0)
322
+ input F0: tensor(batchsize=1, length, dim=1)
323
+ f0 for unvoiced steps should be 0
324
+ output sine_tensor: tensor(batchsize=1, length, dim)
325
+ output uv: tensor(batchsize=1, length, 1)
326
+ """
327
+ with torch.no_grad():
328
+ f0 = f0[:, None].transpose(1, 2)
329
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
+ # fundamental component
331
+ f0_buf[:, :, 0] = f0[:, :, 0]
332
+ for idx in np.arange(self.harmonic_num):
333
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
+ idx + 2
335
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
+ rand_ini = torch.rand(
338
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
+ )
340
+ rand_ini[:, 0] = 0
341
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
+ tmp_over_one *= upp
344
+ tmp_over_one = F.interpolate(
345
+ tmp_over_one.transpose(2, 1),
346
+ scale_factor=upp,
347
+ mode="linear",
348
+ align_corners=True,
349
+ ).transpose(2, 1)
350
+ rad_values = F.interpolate(
351
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
+ ).transpose(
353
+ 2, 1
354
+ ) #######
355
+ tmp_over_one %= 1
356
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
+ cumsum_shift = torch.zeros_like(rad_values)
358
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
+ sine_waves = torch.sin(
360
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
+ )
362
+ sine_waves = sine_waves * self.sine_amp
363
+ uv = self._f02uv(f0)
364
+ uv = F.interpolate(
365
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
+ ).transpose(2, 1)
367
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
+ noise = noise_amp * torch.randn_like(sine_waves)
369
+ sine_waves = sine_waves * uv + noise
370
+ return sine_waves, uv, noise
371
+
372
+
373
+ class SourceModuleHnNSF(torch.nn.Module):
374
+ """SourceModule for hn-nsf
375
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
+ add_noise_std=0.003, voiced_threshod=0)
377
+ sampling_rate: sampling_rate in Hz
378
+ harmonic_num: number of harmonic above F0 (default: 0)
379
+ sine_amp: amplitude of sine source signal (default: 0.1)
380
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
381
+ note that amplitude of noise in unvoiced is decided
382
+ by sine_amp
383
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
384
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
+ F0_sampled (batchsize, length, 1)
386
+ Sine_source (batchsize, length, 1)
387
+ noise_source (batchsize, length 1)
388
+ uv (batchsize, length, 1)
389
+ """
390
+
391
+ def __init__(
392
+ self,
393
+ sampling_rate,
394
+ harmonic_num=0,
395
+ sine_amp=0.1,
396
+ add_noise_std=0.003,
397
+ voiced_threshod=0,
398
+ is_half=True,
399
+ ):
400
+ super(SourceModuleHnNSF, self).__init__()
401
+
402
+ self.sine_amp = sine_amp
403
+ self.noise_std = add_noise_std
404
+ self.is_half = is_half
405
+ # to produce sine waveforms
406
+ self.l_sin_gen = SineGen(
407
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
+ )
409
+
410
+ # to merge source harmonics into a single excitation
411
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
+ self.l_tanh = torch.nn.Tanh()
413
+
414
+ def forward(self, x, upp=None):
415
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
+ if self.is_half:
417
+ sine_wavs = sine_wavs.half()
418
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
+ return sine_merge, None, None # noise, uv
420
+
421
+
422
+ class GeneratorNSF(torch.nn.Module):
423
+ def __init__(
424
+ self,
425
+ initial_channel,
426
+ resblock,
427
+ resblock_kernel_sizes,
428
+ resblock_dilation_sizes,
429
+ upsample_rates,
430
+ upsample_initial_channel,
431
+ upsample_kernel_sizes,
432
+ gin_channels,
433
+ sr,
434
+ is_half=False,
435
+ ):
436
+ super(GeneratorNSF, self).__init__()
437
+ self.num_kernels = len(resblock_kernel_sizes)
438
+ self.num_upsamples = len(upsample_rates)
439
+
440
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
+ self.m_source = SourceModuleHnNSF(
442
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
443
+ )
444
+ self.noise_convs = nn.ModuleList()
445
+ self.conv_pre = Conv1d(
446
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
447
+ )
448
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
+
450
+ self.ups = nn.ModuleList()
451
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
453
+ self.ups.append(
454
+ weight_norm(
455
+ ConvTranspose1d(
456
+ upsample_initial_channel // (2**i),
457
+ upsample_initial_channel // (2 ** (i + 1)),
458
+ k,
459
+ u,
460
+ padding=(k - u) // 2,
461
+ )
462
+ )
463
+ )
464
+ if i + 1 < len(upsample_rates):
465
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
466
+ self.noise_convs.append(
467
+ Conv1d(
468
+ 1,
469
+ c_cur,
470
+ kernel_size=stride_f0 * 2,
471
+ stride=stride_f0,
472
+ padding=stride_f0 // 2,
473
+ )
474
+ )
475
+ else:
476
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
+
478
+ self.resblocks = nn.ModuleList()
479
+ for i in range(len(self.ups)):
480
+ ch = upsample_initial_channel // (2 ** (i + 1))
481
+ for j, (k, d) in enumerate(
482
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
+ ):
484
+ self.resblocks.append(resblock(ch, k, d))
485
+
486
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
+ self.ups.apply(init_weights)
488
+
489
+ if gin_channels != 0:
490
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
+
492
+ self.upp = np.prod(upsample_rates)
493
+
494
+ def forward(self, x, f0, g=None):
495
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
496
+ har_source = har_source.transpose(1, 2)
497
+ x = self.conv_pre(x)
498
+ if g is not None:
499
+ x = x + self.cond(g)
500
+
501
+ for i in range(self.num_upsamples):
502
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
+ x = self.ups[i](x)
504
+ x_source = self.noise_convs[i](har_source)
505
+ x = x + x_source
506
+ xs = None
507
+ for j in range(self.num_kernels):
508
+ if xs is None:
509
+ xs = self.resblocks[i * self.num_kernels + j](x)
510
+ else:
511
+ xs += self.resblocks[i * self.num_kernels + j](x)
512
+ x = xs / self.num_kernels
513
+ x = F.leaky_relu(x)
514
+ x = self.conv_post(x)
515
+ x = torch.tanh(x)
516
+ return x
517
+
518
+ def remove_weight_norm(self):
519
+ for l in self.ups:
520
+ remove_weight_norm(l)
521
+ for l in self.resblocks:
522
+ l.remove_weight_norm()
523
+
524
+
525
+ sr2sr = {
526
+ "32k": 32000,
527
+ "40k": 40000,
528
+ "48k": 48000,
529
+ }
530
+
531
+
532
+ class SynthesizerTrnMsNSFsidM(nn.Module):
533
+ def __init__(
534
+ self,
535
+ spec_channels,
536
+ segment_size,
537
+ inter_channels,
538
+ hidden_channels,
539
+ filter_channels,
540
+ n_heads,
541
+ n_layers,
542
+ kernel_size,
543
+ p_dropout,
544
+ resblock,
545
+ resblock_kernel_sizes,
546
+ resblock_dilation_sizes,
547
+ upsample_rates,
548
+ upsample_initial_channel,
549
+ upsample_kernel_sizes,
550
+ spk_embed_dim,
551
+ gin_channels,
552
+ sr,
553
+ version,
554
+ **kwargs
555
+ ):
556
+ super().__init__()
557
+ if type(sr) == type("strr"):
558
+ sr = sr2sr[sr]
559
+ self.spec_channels = spec_channels
560
+ self.inter_channels = inter_channels
561
+ self.hidden_channels = hidden_channels
562
+ self.filter_channels = filter_channels
563
+ self.n_heads = n_heads
564
+ self.n_layers = n_layers
565
+ self.kernel_size = kernel_size
566
+ self.p_dropout = p_dropout
567
+ self.resblock = resblock
568
+ self.resblock_kernel_sizes = resblock_kernel_sizes
569
+ self.resblock_dilation_sizes = resblock_dilation_sizes
570
+ self.upsample_rates = upsample_rates
571
+ self.upsample_initial_channel = upsample_initial_channel
572
+ self.upsample_kernel_sizes = upsample_kernel_sizes
573
+ self.segment_size = segment_size
574
+ self.gin_channels = gin_channels
575
+ # self.hop_length = hop_length#
576
+ self.spk_embed_dim = spk_embed_dim
577
+ if version == "v1":
578
+ self.enc_p = TextEncoder256(
579
+ inter_channels,
580
+ hidden_channels,
581
+ filter_channels,
582
+ n_heads,
583
+ n_layers,
584
+ kernel_size,
585
+ p_dropout,
586
+ )
587
+ else:
588
+ self.enc_p = TextEncoder768(
589
+ inter_channels,
590
+ hidden_channels,
591
+ filter_channels,
592
+ n_heads,
593
+ n_layers,
594
+ kernel_size,
595
+ p_dropout,
596
+ )
597
+ self.dec = GeneratorNSF(
598
+ inter_channels,
599
+ resblock,
600
+ resblock_kernel_sizes,
601
+ resblock_dilation_sizes,
602
+ upsample_rates,
603
+ upsample_initial_channel,
604
+ upsample_kernel_sizes,
605
+ gin_channels=gin_channels,
606
+ sr=sr,
607
+ is_half=kwargs["is_half"],
608
+ )
609
+ self.enc_q = PosteriorEncoder(
610
+ spec_channels,
611
+ inter_channels,
612
+ hidden_channels,
613
+ 5,
614
+ 1,
615
+ 16,
616
+ gin_channels=gin_channels,
617
+ )
618
+ self.flow = ResidualCouplingBlock(
619
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
620
+ )
621
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
622
+ self.speaker_map = None
623
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
624
+
625
+ def remove_weight_norm(self):
626
+ self.dec.remove_weight_norm()
627
+ self.flow.remove_weight_norm()
628
+ self.enc_q.remove_weight_norm()
629
+
630
+ def construct_spkmixmap(self, n_speaker):
631
+ self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
632
+ for i in range(n_speaker):
633
+ self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
634
+ self.speaker_map = self.speaker_map.unsqueeze(0)
635
+
636
+ def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
637
+ if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
638
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
639
+ g = g * self.speaker_map # [N, S, B, 1, H]
640
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
641
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
642
+ else:
643
+ g = g.unsqueeze(0)
644
+ g = self.emb_g(g).transpose(1, 2)
645
+
646
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
647
+ z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
648
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
649
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
650
+ return o
651
+
652
+
653
+ class MultiPeriodDiscriminator(torch.nn.Module):
654
+ def __init__(self, use_spectral_norm=False):
655
+ super(MultiPeriodDiscriminator, self).__init__()
656
+ periods = [2, 3, 5, 7, 11, 17]
657
+ # periods = [3, 5, 7, 11, 17, 23, 37]
658
+
659
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
660
+ discs = discs + [
661
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
662
+ ]
663
+ self.discriminators = nn.ModuleList(discs)
664
+
665
+ def forward(self, y, y_hat):
666
+ y_d_rs = [] #
667
+ y_d_gs = []
668
+ fmap_rs = []
669
+ fmap_gs = []
670
+ for i, d in enumerate(self.discriminators):
671
+ y_d_r, fmap_r = d(y)
672
+ y_d_g, fmap_g = d(y_hat)
673
+ # for j in range(len(fmap_r)):
674
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
675
+ y_d_rs.append(y_d_r)
676
+ y_d_gs.append(y_d_g)
677
+ fmap_rs.append(fmap_r)
678
+ fmap_gs.append(fmap_g)
679
+
680
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
681
+
682
+
683
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
684
+ def __init__(self, use_spectral_norm=False):
685
+ super(MultiPeriodDiscriminatorV2, self).__init__()
686
+ # periods = [2, 3, 5, 7, 11, 17]
687
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
688
+
689
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
690
+ discs = discs + [
691
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
692
+ ]
693
+ self.discriminators = nn.ModuleList(discs)
694
+
695
+ def forward(self, y, y_hat):
696
+ y_d_rs = [] #
697
+ y_d_gs = []
698
+ fmap_rs = []
699
+ fmap_gs = []
700
+ for i, d in enumerate(self.discriminators):
701
+ y_d_r, fmap_r = d(y)
702
+ y_d_g, fmap_g = d(y_hat)
703
+ # for j in range(len(fmap_r)):
704
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
705
+ y_d_rs.append(y_d_r)
706
+ y_d_gs.append(y_d_g)
707
+ fmap_rs.append(fmap_r)
708
+ fmap_gs.append(fmap_g)
709
+
710
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
711
+
712
+
713
+ class DiscriminatorS(torch.nn.Module):
714
+ def __init__(self, use_spectral_norm=False):
715
+ super(DiscriminatorS, self).__init__()
716
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
717
+ self.convs = nn.ModuleList(
718
+ [
719
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
720
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
721
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
722
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
723
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
724
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
725
+ ]
726
+ )
727
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
728
+
729
+ def forward(self, x):
730
+ fmap = []
731
+
732
+ for l in self.convs:
733
+ x = l(x)
734
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
735
+ fmap.append(x)
736
+ x = self.conv_post(x)
737
+ fmap.append(x)
738
+ x = torch.flatten(x, 1, -1)
739
+
740
+ return x, fmap
741
+
742
+
743
+ class DiscriminatorP(torch.nn.Module):
744
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
745
+ super(DiscriminatorP, self).__init__()
746
+ self.period = period
747
+ self.use_spectral_norm = use_spectral_norm
748
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
749
+ self.convs = nn.ModuleList(
750
+ [
751
+ norm_f(
752
+ Conv2d(
753
+ 1,
754
+ 32,
755
+ (kernel_size, 1),
756
+ (stride, 1),
757
+ padding=(get_padding(kernel_size, 1), 0),
758
+ )
759
+ ),
760
+ norm_f(
761
+ Conv2d(
762
+ 32,
763
+ 128,
764
+ (kernel_size, 1),
765
+ (stride, 1),
766
+ padding=(get_padding(kernel_size, 1), 0),
767
+ )
768
+ ),
769
+ norm_f(
770
+ Conv2d(
771
+ 128,
772
+ 512,
773
+ (kernel_size, 1),
774
+ (stride, 1),
775
+ padding=(get_padding(kernel_size, 1), 0),
776
+ )
777
+ ),
778
+ norm_f(
779
+ Conv2d(
780
+ 512,
781
+ 1024,
782
+ (kernel_size, 1),
783
+ (stride, 1),
784
+ padding=(get_padding(kernel_size, 1), 0),
785
+ )
786
+ ),
787
+ norm_f(
788
+ Conv2d(
789
+ 1024,
790
+ 1024,
791
+ (kernel_size, 1),
792
+ 1,
793
+ padding=(get_padding(kernel_size, 1), 0),
794
+ )
795
+ ),
796
+ ]
797
+ )
798
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
799
+
800
+ def forward(self, x):
801
+ fmap = []
802
+
803
+ # 1d to 2d
804
+ b, c, t = x.shape
805
+ if t % self.period != 0: # pad first
806
+ n_pad = self.period - (t % self.period)
807
+ x = F.pad(x, (0, n_pad), "reflect")
808
+ t = t + n_pad
809
+ x = x.view(b, c, t // self.period, self.period)
810
+
811
+ for l in self.convs:
812
+ x = l(x)
813
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
814
+ fmap.append(x)
815
+ x = self.conv_post(x)
816
+ fmap.append(x)
817
+ x = torch.flatten(x, 1, -1)
818
+
819
+ return x, fmap
Bocchi-the-Rock/lib/infer_pack/modules.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ from lib.infer_pack import commons
13
+ from lib.infer_pack.commons import init_weights, get_padding
14
+ from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
15
+
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(
37
+ self,
38
+ in_channels,
39
+ hidden_channels,
40
+ out_channels,
41
+ kernel_size,
42
+ n_layers,
43
+ p_dropout,
44
+ ):
45
+ super().__init__()
46
+ self.in_channels = in_channels
47
+ self.hidden_channels = hidden_channels
48
+ self.out_channels = out_channels
49
+ self.kernel_size = kernel_size
50
+ self.n_layers = n_layers
51
+ self.p_dropout = p_dropout
52
+ assert n_layers > 1, "Number of layers should be larger than 0."
53
+
54
+ self.conv_layers = nn.ModuleList()
55
+ self.norm_layers = nn.ModuleList()
56
+ self.conv_layers.append(
57
+ nn.Conv1d(
58
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
+ )
60
+ )
61
+ self.norm_layers.append(LayerNorm(hidden_channels))
62
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
63
+ for _ in range(n_layers - 1):
64
+ self.conv_layers.append(
65
+ nn.Conv1d(
66
+ hidden_channels,
67
+ hidden_channels,
68
+ kernel_size,
69
+ padding=kernel_size // 2,
70
+ )
71
+ )
72
+ self.norm_layers.append(LayerNorm(hidden_channels))
73
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
+ self.proj.weight.data.zero_()
75
+ self.proj.bias.data.zero_()
76
+
77
+ def forward(self, x, x_mask):
78
+ x_org = x
79
+ for i in range(self.n_layers):
80
+ x = self.conv_layers[i](x * x_mask)
81
+ x = self.norm_layers[i](x)
82
+ x = self.relu_drop(x)
83
+ x = x_org + self.proj(x)
84
+ return x * x_mask
85
+
86
+
87
+ class DDSConv(nn.Module):
88
+ """
89
+ Dialted and Depth-Separable Convolution
90
+ """
91
+
92
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
+ super().__init__()
94
+ self.channels = channels
95
+ self.kernel_size = kernel_size
96
+ self.n_layers = n_layers
97
+ self.p_dropout = p_dropout
98
+
99
+ self.drop = nn.Dropout(p_dropout)
100
+ self.convs_sep = nn.ModuleList()
101
+ self.convs_1x1 = nn.ModuleList()
102
+ self.norms_1 = nn.ModuleList()
103
+ self.norms_2 = nn.ModuleList()
104
+ for i in range(n_layers):
105
+ dilation = kernel_size**i
106
+ padding = (kernel_size * dilation - dilation) // 2
107
+ self.convs_sep.append(
108
+ nn.Conv1d(
109
+ channels,
110
+ channels,
111
+ kernel_size,
112
+ groups=channels,
113
+ dilation=dilation,
114
+ padding=padding,
115
+ )
116
+ )
117
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
+ self.norms_1.append(LayerNorm(channels))
119
+ self.norms_2.append(LayerNorm(channels))
120
+
121
+ def forward(self, x, x_mask, g=None):
122
+ if g is not None:
123
+ x = x + g
124
+ for i in range(self.n_layers):
125
+ y = self.convs_sep[i](x * x_mask)
126
+ y = self.norms_1[i](y)
127
+ y = F.gelu(y)
128
+ y = self.convs_1x1[i](y)
129
+ y = self.norms_2[i](y)
130
+ y = F.gelu(y)
131
+ y = self.drop(y)
132
+ x = x + y
133
+ return x * x_mask
134
+
135
+
136
+ class WN(torch.nn.Module):
137
+ def __init__(
138
+ self,
139
+ hidden_channels,
140
+ kernel_size,
141
+ dilation_rate,
142
+ n_layers,
143
+ gin_channels=0,
144
+ p_dropout=0,
145
+ ):
146
+ super(WN, self).__init__()
147
+ assert kernel_size % 2 == 1
148
+ self.hidden_channels = hidden_channels
149
+ self.kernel_size = (kernel_size,)
150
+ self.dilation_rate = dilation_rate
151
+ self.n_layers = n_layers
152
+ self.gin_channels = gin_channels
153
+ self.p_dropout = p_dropout
154
+
155
+ self.in_layers = torch.nn.ModuleList()
156
+ self.res_skip_layers = torch.nn.ModuleList()
157
+ self.drop = nn.Dropout(p_dropout)
158
+
159
+ if gin_channels != 0:
160
+ cond_layer = torch.nn.Conv1d(
161
+ gin_channels, 2 * hidden_channels * n_layers, 1
162
+ )
163
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
+
165
+ for i in range(n_layers):
166
+ dilation = dilation_rate**i
167
+ padding = int((kernel_size * dilation - dilation) / 2)
168
+ in_layer = torch.nn.Conv1d(
169
+ hidden_channels,
170
+ 2 * hidden_channels,
171
+ kernel_size,
172
+ dilation=dilation,
173
+ padding=padding,
174
+ )
175
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
+ self.in_layers.append(in_layer)
177
+
178
+ # last one is not necessary
179
+ if i < n_layers - 1:
180
+ res_skip_channels = 2 * hidden_channels
181
+ else:
182
+ res_skip_channels = hidden_channels
183
+
184
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
+ self.res_skip_layers.append(res_skip_layer)
187
+
188
+ def forward(self, x, x_mask, g=None, **kwargs):
189
+ output = torch.zeros_like(x)
190
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
191
+
192
+ if g is not None:
193
+ g = self.cond_layer(g)
194
+
195
+ for i in range(self.n_layers):
196
+ x_in = self.in_layers[i](x)
197
+ if g is not None:
198
+ cond_offset = i * 2 * self.hidden_channels
199
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
200
+ else:
201
+ g_l = torch.zeros_like(x_in)
202
+
203
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
204
+ acts = self.drop(acts)
205
+
206
+ res_skip_acts = self.res_skip_layers[i](acts)
207
+ if i < self.n_layers - 1:
208
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
209
+ x = (x + res_acts) * x_mask
210
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
211
+ else:
212
+ output = output + res_skip_acts
213
+ return output * x_mask
214
+
215
+ def remove_weight_norm(self):
216
+ if self.gin_channels != 0:
217
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
218
+ for l in self.in_layers:
219
+ torch.nn.utils.remove_weight_norm(l)
220
+ for l in self.res_skip_layers:
221
+ torch.nn.utils.remove_weight_norm(l)
222
+
223
+
224
+ class ResBlock1(torch.nn.Module):
225
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
226
+ super(ResBlock1, self).__init__()
227
+ self.convs1 = nn.ModuleList(
228
+ [
229
+ weight_norm(
230
+ Conv1d(
231
+ channels,
232
+ channels,
233
+ kernel_size,
234
+ 1,
235
+ dilation=dilation[0],
236
+ padding=get_padding(kernel_size, dilation[0]),
237
+ )
238
+ ),
239
+ weight_norm(
240
+ Conv1d(
241
+ channels,
242
+ channels,
243
+ kernel_size,
244
+ 1,
245
+ dilation=dilation[1],
246
+ padding=get_padding(kernel_size, dilation[1]),
247
+ )
248
+ ),
249
+ weight_norm(
250
+ Conv1d(
251
+ channels,
252
+ channels,
253
+ kernel_size,
254
+ 1,
255
+ dilation=dilation[2],
256
+ padding=get_padding(kernel_size, dilation[2]),
257
+ )
258
+ ),
259
+ ]
260
+ )
261
+ self.convs1.apply(init_weights)
262
+
263
+ self.convs2 = nn.ModuleList(
264
+ [
265
+ weight_norm(
266
+ Conv1d(
267
+ channels,
268
+ channels,
269
+ kernel_size,
270
+ 1,
271
+ dilation=1,
272
+ padding=get_padding(kernel_size, 1),
273
+ )
274
+ ),
275
+ weight_norm(
276
+ Conv1d(
277
+ channels,
278
+ channels,
279
+ kernel_size,
280
+ 1,
281
+ dilation=1,
282
+ padding=get_padding(kernel_size, 1),
283
+ )
284
+ ),
285
+ weight_norm(
286
+ Conv1d(
287
+ channels,
288
+ channels,
289
+ kernel_size,
290
+ 1,
291
+ dilation=1,
292
+ padding=get_padding(kernel_size, 1),
293
+ )
294
+ ),
295
+ ]
296
+ )
297
+ self.convs2.apply(init_weights)
298
+
299
+ def forward(self, x, x_mask=None):
300
+ for c1, c2 in zip(self.convs1, self.convs2):
301
+ xt = F.leaky_relu(x, LRELU_SLOPE)
302
+ if x_mask is not None:
303
+ xt = xt * x_mask
304
+ xt = c1(xt)
305
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
306
+ if x_mask is not None:
307
+ xt = xt * x_mask
308
+ xt = c2(xt)
309
+ x = xt + x
310
+ if x_mask is not None:
311
+ x = x * x_mask
312
+ return x
313
+
314
+ def remove_weight_norm(self):
315
+ for l in self.convs1:
316
+ remove_weight_norm(l)
317
+ for l in self.convs2:
318
+ remove_weight_norm(l)
319
+
320
+
321
+ class ResBlock2(torch.nn.Module):
322
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
323
+ super(ResBlock2, self).__init__()
324
+ self.convs = nn.ModuleList(
325
+ [
326
+ weight_norm(
327
+ Conv1d(
328
+ channels,
329
+ channels,
330
+ kernel_size,
331
+ 1,
332
+ dilation=dilation[0],
333
+ padding=get_padding(kernel_size, dilation[0]),
334
+ )
335
+ ),
336
+ weight_norm(
337
+ Conv1d(
338
+ channels,
339
+ channels,
340
+ kernel_size,
341
+ 1,
342
+ dilation=dilation[1],
343
+ padding=get_padding(kernel_size, dilation[1]),
344
+ )
345
+ ),
346
+ ]
347
+ )
348
+ self.convs.apply(init_weights)
349
+
350
+ def forward(self, x, x_mask=None):
351
+ for c in self.convs:
352
+ xt = F.leaky_relu(x, LRELU_SLOPE)
353
+ if x_mask is not None:
354
+ xt = xt * x_mask
355
+ xt = c(xt)
356
+ x = xt + x
357
+ if x_mask is not None:
358
+ x = x * x_mask
359
+ return x
360
+
361
+ def remove_weight_norm(self):
362
+ for l in self.convs:
363
+ remove_weight_norm(l)
364
+
365
+
366
+ class Log(nn.Module):
367
+ def forward(self, x, x_mask, reverse=False, **kwargs):
368
+ if not reverse:
369
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
370
+ logdet = torch.sum(-y, [1, 2])
371
+ return y, logdet
372
+ else:
373
+ x = torch.exp(x) * x_mask
374
+ return x
375
+
376
+
377
+ class Flip(nn.Module):
378
+ def forward(self, x, *args, reverse=False, **kwargs):
379
+ x = torch.flip(x, [1])
380
+ if not reverse:
381
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
382
+ return x, logdet
383
+ else:
384
+ return x
385
+
386
+
387
+ class ElementwiseAffine(nn.Module):
388
+ def __init__(self, channels):
389
+ super().__init__()
390
+ self.channels = channels
391
+ self.m = nn.Parameter(torch.zeros(channels, 1))
392
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
393
+
394
+ def forward(self, x, x_mask, reverse=False, **kwargs):
395
+ if not reverse:
396
+ y = self.m + torch.exp(self.logs) * x
397
+ y = y * x_mask
398
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
399
+ return y, logdet
400
+ else:
401
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
402
+ return x
403
+
404
+
405
+ class ResidualCouplingLayer(nn.Module):
406
+ def __init__(
407
+ self,
408
+ channels,
409
+ hidden_channels,
410
+ kernel_size,
411
+ dilation_rate,
412
+ n_layers,
413
+ p_dropout=0,
414
+ gin_channels=0,
415
+ mean_only=False,
416
+ ):
417
+ assert channels % 2 == 0, "channels should be divisible by 2"
418
+ super().__init__()
419
+ self.channels = channels
420
+ self.hidden_channels = hidden_channels
421
+ self.kernel_size = kernel_size
422
+ self.dilation_rate = dilation_rate
423
+ self.n_layers = n_layers
424
+ self.half_channels = channels // 2
425
+ self.mean_only = mean_only
426
+
427
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
428
+ self.enc = WN(
429
+ hidden_channels,
430
+ kernel_size,
431
+ dilation_rate,
432
+ n_layers,
433
+ p_dropout=p_dropout,
434
+ gin_channels=gin_channels,
435
+ )
436
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
437
+ self.post.weight.data.zero_()
438
+ self.post.bias.data.zero_()
439
+
440
+ def forward(self, x, x_mask, g=None, reverse=False):
441
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
442
+ h = self.pre(x0) * x_mask
443
+ h = self.enc(h, x_mask, g=g)
444
+ stats = self.post(h) * x_mask
445
+ if not self.mean_only:
446
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
447
+ else:
448
+ m = stats
449
+ logs = torch.zeros_like(m)
450
+
451
+ if not reverse:
452
+ x1 = m + x1 * torch.exp(logs) * x_mask
453
+ x = torch.cat([x0, x1], 1)
454
+ logdet = torch.sum(logs, [1, 2])
455
+ return x, logdet
456
+ else:
457
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
458
+ x = torch.cat([x0, x1], 1)
459
+ return x
460
+
461
+ def remove_weight_norm(self):
462
+ self.enc.remove_weight_norm()
463
+
464
+
465
+ class ConvFlow(nn.Module):
466
+ def __init__(
467
+ self,
468
+ in_channels,
469
+ filter_channels,
470
+ kernel_size,
471
+ n_layers,
472
+ num_bins=10,
473
+ tail_bound=5.0,
474
+ ):
475
+ super().__init__()
476
+ self.in_channels = in_channels
477
+ self.filter_channels = filter_channels
478
+ self.kernel_size = kernel_size
479
+ self.n_layers = n_layers
480
+ self.num_bins = num_bins
481
+ self.tail_bound = tail_bound
482
+ self.half_channels = in_channels // 2
483
+
484
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
485
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
486
+ self.proj = nn.Conv1d(
487
+ filter_channels, self.half_channels * (num_bins * 3 - 1), 1
488
+ )
489
+ self.proj.weight.data.zero_()
490
+ self.proj.bias.data.zero_()
491
+
492
+ def forward(self, x, x_mask, g=None, reverse=False):
493
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
494
+ h = self.pre(x0)
495
+ h = self.convs(h, x_mask, g=g)
496
+ h = self.proj(h) * x_mask
497
+
498
+ b, c, t = x0.shape
499
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
500
+
501
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
502
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
503
+ self.filter_channels
504
+ )
505
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
506
+
507
+ x1, logabsdet = piecewise_rational_quadratic_transform(
508
+ x1,
509
+ unnormalized_widths,
510
+ unnormalized_heights,
511
+ unnormalized_derivatives,
512
+ inverse=reverse,
513
+ tails="linear",
514
+ tail_bound=self.tail_bound,
515
+ )
516
+
517
+ x = torch.cat([x0, x1], 1) * x_mask
518
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
519
+ if not reverse:
520
+ return x, logdet
521
+ else:
522
+ return x
Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+
6
+ class DioF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ """
15
+ 对F0进行插值处理
16
+ """
17
+
18
+ data = np.reshape(f0, (f0.size, 1))
19
+
20
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
+ vuv_vector[data > 0.0] = 1.0
22
+ vuv_vector[data <= 0.0] = 0.0
23
+
24
+ ip_data = data
25
+
26
+ frame_number = data.size
27
+ last_value = 0.0
28
+ for i in range(frame_number):
29
+ if data[i] <= 0.0:
30
+ j = i + 1
31
+ for j in range(i + 1, frame_number):
32
+ if data[j] > 0.0:
33
+ break
34
+ if j < frame_number - 1:
35
+ if last_value > 0.0:
36
+ step = (data[j] - data[i - 1]) / float(j - i)
37
+ for k in range(i, j):
38
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
39
+ else:
40
+ for k in range(i, j):
41
+ ip_data[k] = data[j]
42
+ else:
43
+ for k in range(i, frame_number):
44
+ ip_data[k] = last_value
45
+ else:
46
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
+ last_value = data[i]
48
+
49
+ return ip_data[:, 0], vuv_vector[:, 0]
50
+
51
+ def resize_f0(self, x, target_len):
52
+ source = np.array(x)
53
+ source[source < 0.001] = np.nan
54
+ target = np.interp(
55
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
56
+ np.arange(0, len(source)),
57
+ source,
58
+ )
59
+ res = np.nan_to_num(target)
60
+ return res
61
+
62
+ def compute_f0(self, wav, p_len=None):
63
+ if p_len is None:
64
+ p_len = wav.shape[0] // self.hop_length
65
+ f0, t = pyworld.dio(
66
+ wav.astype(np.double),
67
+ fs=self.sampling_rate,
68
+ f0_floor=self.f0_min,
69
+ f0_ceil=self.f0_max,
70
+ frame_period=1000 * self.hop_length / self.sampling_rate,
71
+ )
72
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
73
+ for index, pitch in enumerate(f0):
74
+ f0[index] = round(pitch, 1)
75
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
76
+
77
+ def compute_f0_uv(self, wav, p_len=None):
78
+ if p_len is None:
79
+ p_len = wav.shape[0] // self.hop_length
80
+ f0, t = pyworld.dio(
81
+ wav.astype(np.double),
82
+ fs=self.sampling_rate,
83
+ f0_floor=self.f0_min,
84
+ f0_ceil=self.f0_max,
85
+ frame_period=1000 * self.hop_length / self.sampling_rate,
86
+ )
87
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
88
+ for index, pitch in enumerate(f0):
89
+ f0[index] = round(pitch, 1)
90
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/F0Predictor.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class F0Predictor(object):
2
+ def compute_f0(self, wav, p_len):
3
+ """
4
+ input: wav:[signal_length]
5
+ p_len:int
6
+ output: f0:[signal_length//hop_length]
7
+ """
8
+ pass
9
+
10
+ def compute_f0_uv(self, wav, p_len):
11
+ """
12
+ input: wav:[signal_length]
13
+ p_len:int
14
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
15
+ """
16
+ pass
Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+
6
+ class HarvestF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ """
15
+ 对F0进行插值处理
16
+ """
17
+
18
+ data = np.reshape(f0, (f0.size, 1))
19
+
20
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
+ vuv_vector[data > 0.0] = 1.0
22
+ vuv_vector[data <= 0.0] = 0.0
23
+
24
+ ip_data = data
25
+
26
+ frame_number = data.size
27
+ last_value = 0.0
28
+ for i in range(frame_number):
29
+ if data[i] <= 0.0:
30
+ j = i + 1
31
+ for j in range(i + 1, frame_number):
32
+ if data[j] > 0.0:
33
+ break
34
+ if j < frame_number - 1:
35
+ if last_value > 0.0:
36
+ step = (data[j] - data[i - 1]) / float(j - i)
37
+ for k in range(i, j):
38
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
39
+ else:
40
+ for k in range(i, j):
41
+ ip_data[k] = data[j]
42
+ else:
43
+ for k in range(i, frame_number):
44
+ ip_data[k] = last_value
45
+ else:
46
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
+ last_value = data[i]
48
+
49
+ return ip_data[:, 0], vuv_vector[:, 0]
50
+
51
+ def resize_f0(self, x, target_len):
52
+ source = np.array(x)
53
+ source[source < 0.001] = np.nan
54
+ target = np.interp(
55
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
56
+ np.arange(0, len(source)),
57
+ source,
58
+ )
59
+ res = np.nan_to_num(target)
60
+ return res
61
+
62
+ def compute_f0(self, wav, p_len=None):
63
+ if p_len is None:
64
+ p_len = wav.shape[0] // self.hop_length
65
+ f0, t = pyworld.harvest(
66
+ wav.astype(np.double),
67
+ fs=self.hop_length,
68
+ f0_ceil=self.f0_max,
69
+ f0_floor=self.f0_min,
70
+ frame_period=1000 * self.hop_length / self.sampling_rate,
71
+ )
72
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
73
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
74
+
75
+ def compute_f0_uv(self, wav, p_len=None):
76
+ if p_len is None:
77
+ p_len = wav.shape[0] // self.hop_length
78
+ f0, t = pyworld.harvest(
79
+ wav.astype(np.double),
80
+ fs=self.sampling_rate,
81
+ f0_floor=self.f0_min,
82
+ f0_ceil=self.f0_max,
83
+ frame_period=1000 * self.hop_length / self.sampling_rate,
84
+ )
85
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
86
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import parselmouth
3
+ import numpy as np
4
+
5
+
6
+ class PMF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ """
15
+ 对F0进行插值处理
16
+ """
17
+
18
+ data = np.reshape(f0, (f0.size, 1))
19
+
20
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
+ vuv_vector[data > 0.0] = 1.0
22
+ vuv_vector[data <= 0.0] = 0.0
23
+
24
+ ip_data = data
25
+
26
+ frame_number = data.size
27
+ last_value = 0.0
28
+ for i in range(frame_number):
29
+ if data[i] <= 0.0:
30
+ j = i + 1
31
+ for j in range(i + 1, frame_number):
32
+ if data[j] > 0.0:
33
+ break
34
+ if j < frame_number - 1:
35
+ if last_value > 0.0:
36
+ step = (data[j] - data[i - 1]) / float(j - i)
37
+ for k in range(i, j):
38
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
39
+ else:
40
+ for k in range(i, j):
41
+ ip_data[k] = data[j]
42
+ else:
43
+ for k in range(i, frame_number):
44
+ ip_data[k] = last_value
45
+ else:
46
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
+ last_value = data[i]
48
+
49
+ return ip_data[:, 0], vuv_vector[:, 0]
50
+
51
+ def compute_f0(self, wav, p_len=None):
52
+ x = wav
53
+ if p_len is None:
54
+ p_len = x.shape[0] // self.hop_length
55
+ else:
56
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
57
+ time_step = self.hop_length / self.sampling_rate * 1000
58
+ f0 = (
59
+ parselmouth.Sound(x, self.sampling_rate)
60
+ .to_pitch_ac(
61
+ time_step=time_step / 1000,
62
+ voicing_threshold=0.6,
63
+ pitch_floor=self.f0_min,
64
+ pitch_ceiling=self.f0_max,
65
+ )
66
+ .selected_array["frequency"]
67
+ )
68
+
69
+ pad_size = (p_len - len(f0) + 1) // 2
70
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
71
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
72
+ f0, uv = self.interpolate_f0(f0)
73
+ return f0
74
+
75
+ def compute_f0_uv(self, wav, p_len=None):
76
+ x = wav
77
+ if p_len is None:
78
+ p_len = x.shape[0] // self.hop_length
79
+ else:
80
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
81
+ time_step = self.hop_length / self.sampling_rate * 1000
82
+ f0 = (
83
+ parselmouth.Sound(x, self.sampling_rate)
84
+ .to_pitch_ac(
85
+ time_step=time_step / 1000,
86
+ voicing_threshold=0.6,
87
+ pitch_floor=self.f0_min,
88
+ pitch_ceiling=self.f0_max,
89
+ )
90
+ .selected_array["frequency"]
91
+ )
92
+
93
+ pad_size = (p_len - len(f0) + 1) // 2
94
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
95
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
96
+ f0, uv = self.interpolate_f0(f0)
97
+ return f0, uv
Bocchi-the-Rock/lib/infer_pack/modules/F0Predictor/__init__.py ADDED
File without changes
Bocchi-the-Rock/lib/infer_pack/onnx_inference.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime
2
+ import librosa
3
+ import numpy as np
4
+ import soundfile
5
+
6
+
7
+ class ContentVec:
8
+ def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
9
+ print("load model(s) from {}".format(vec_path))
10
+ if device == "cpu" or device is None:
11
+ providers = ["CPUExecutionProvider"]
12
+ elif device == "cuda":
13
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
14
+ elif device == "dml":
15
+ providers = ["DmlExecutionProvider"]
16
+ else:
17
+ raise RuntimeError("Unsportted Device")
18
+ self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
19
+
20
+ def __call__(self, wav):
21
+ return self.forward(wav)
22
+
23
+ def forward(self, wav):
24
+ feats = wav
25
+ if feats.ndim == 2: # double channels
26
+ feats = feats.mean(-1)
27
+ assert feats.ndim == 1, feats.ndim
28
+ feats = np.expand_dims(np.expand_dims(feats, 0), 0)
29
+ onnx_input = {self.model.get_inputs()[0].name: feats}
30
+ logits = self.model.run(None, onnx_input)[0]
31
+ return logits.transpose(0, 2, 1)
32
+
33
+
34
+ def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
35
+ if f0_predictor == "pm":
36
+ from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
37
+
38
+ f0_predictor_object = PMF0Predictor(
39
+ hop_length=hop_length, sampling_rate=sampling_rate
40
+ )
41
+ elif f0_predictor == "harvest":
42
+ from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
43
+ HarvestF0Predictor,
44
+ )
45
+
46
+ f0_predictor_object = HarvestF0Predictor(
47
+ hop_length=hop_length, sampling_rate=sampling_rate
48
+ )
49
+ elif f0_predictor == "dio":
50
+ from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
51
+
52
+ f0_predictor_object = DioF0Predictor(
53
+ hop_length=hop_length, sampling_rate=sampling_rate
54
+ )
55
+ else:
56
+ raise Exception("Unknown f0 predictor")
57
+ return f0_predictor_object
58
+
59
+
60
+ class OnnxRVC:
61
+ def __init__(
62
+ self,
63
+ model_path,
64
+ sr=40000,
65
+ hop_size=512,
66
+ vec_path="vec-768-layer-12",
67
+ device="cpu",
68
+ ):
69
+ vec_path = f"pretrained/{vec_path}.onnx"
70
+ self.vec_model = ContentVec(vec_path, device)
71
+ if device == "cpu" or device is None:
72
+ providers = ["CPUExecutionProvider"]
73
+ elif device == "cuda":
74
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
75
+ elif device == "dml":
76
+ providers = ["DmlExecutionProvider"]
77
+ else:
78
+ raise RuntimeError("Unsportted Device")
79
+ self.model = onnxruntime.InferenceSession(model_path, providers=providers)
80
+ self.sampling_rate = sr
81
+ self.hop_size = hop_size
82
+
83
+ def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
84
+ onnx_input = {
85
+ self.model.get_inputs()[0].name: hubert,
86
+ self.model.get_inputs()[1].name: hubert_length,
87
+ self.model.get_inputs()[2].name: pitch,
88
+ self.model.get_inputs()[3].name: pitchf,
89
+ self.model.get_inputs()[4].name: ds,
90
+ self.model.get_inputs()[5].name: rnd,
91
+ }
92
+ return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
93
+
94
+ def inference(
95
+ self,
96
+ raw_path,
97
+ sid,
98
+ f0_method="dio",
99
+ f0_up_key=0,
100
+ pad_time=0.5,
101
+ cr_threshold=0.02,
102
+ ):
103
+ f0_min = 50
104
+ f0_max = 1100
105
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
106
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
107
+ f0_predictor = get_f0_predictor(
108
+ f0_method,
109
+ hop_length=self.hop_size,
110
+ sampling_rate=self.sampling_rate,
111
+ threshold=cr_threshold,
112
+ )
113
+ wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
114
+ org_length = len(wav)
115
+ if org_length / sr > 50.0:
116
+ raise RuntimeError("Reached Max Length")
117
+
118
+ wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
119
+ wav16k = wav16k
120
+
121
+ hubert = self.vec_model(wav16k)
122
+ hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
123
+ hubert_length = hubert.shape[1]
124
+
125
+ pitchf = f0_predictor.compute_f0(wav, hubert_length)
126
+ pitchf = pitchf * 2 ** (f0_up_key / 12)
127
+ pitch = pitchf.copy()
128
+ f0_mel = 1127 * np.log(1 + pitch / 700)
129
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
130
+ f0_mel_max - f0_mel_min
131
+ ) + 1
132
+ f0_mel[f0_mel <= 1] = 1
133
+ f0_mel[f0_mel > 255] = 255
134
+ pitch = np.rint(f0_mel).astype(np.int64)
135
+
136
+ pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
137
+ pitch = pitch.reshape(1, len(pitch))
138
+ ds = np.array([sid]).astype(np.int64)
139
+
140
+ rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
141
+ hubert_length = np.array([hubert_length]).astype(np.int64)
142
+
143
+ out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
144
+ out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
145
+ return out_wav[0:org_length]
Bocchi-the-Rock/lib/infer_pack/transforms.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+
7
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
8
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
+ DEFAULT_MIN_DERIVATIVE = 1e-3
10
+
11
+
12
+ def piecewise_rational_quadratic_transform(
13
+ inputs,
14
+ unnormalized_widths,
15
+ unnormalized_heights,
16
+ unnormalized_derivatives,
17
+ inverse=False,
18
+ tails=None,
19
+ tail_bound=1.0,
20
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
21
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
22
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
23
+ ):
24
+ if tails is None:
25
+ spline_fn = rational_quadratic_spline
26
+ spline_kwargs = {}
27
+ else:
28
+ spline_fn = unconstrained_rational_quadratic_spline
29
+ spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
30
+
31
+ outputs, logabsdet = spline_fn(
32
+ inputs=inputs,
33
+ unnormalized_widths=unnormalized_widths,
34
+ unnormalized_heights=unnormalized_heights,
35
+ unnormalized_derivatives=unnormalized_derivatives,
36
+ inverse=inverse,
37
+ min_bin_width=min_bin_width,
38
+ min_bin_height=min_bin_height,
39
+ min_derivative=min_derivative,
40
+ **spline_kwargs
41
+ )
42
+ return outputs, logabsdet
43
+
44
+
45
+ def searchsorted(bin_locations, inputs, eps=1e-6):
46
+ bin_locations[..., -1] += eps
47
+ return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
48
+
49
+
50
+ def unconstrained_rational_quadratic_spline(
51
+ inputs,
52
+ unnormalized_widths,
53
+ unnormalized_heights,
54
+ unnormalized_derivatives,
55
+ inverse=False,
56
+ tails="linear",
57
+ tail_bound=1.0,
58
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
59
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
60
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
61
+ ):
62
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
63
+ outside_interval_mask = ~inside_interval_mask
64
+
65
+ outputs = torch.zeros_like(inputs)
66
+ logabsdet = torch.zeros_like(inputs)
67
+
68
+ if tails == "linear":
69
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
70
+ constant = np.log(np.exp(1 - min_derivative) - 1)
71
+ unnormalized_derivatives[..., 0] = constant
72
+ unnormalized_derivatives[..., -1] = constant
73
+
74
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
75
+ logabsdet[outside_interval_mask] = 0
76
+ else:
77
+ raise RuntimeError("{} tails are not implemented.".format(tails))
78
+
79
+ (
80
+ outputs[inside_interval_mask],
81
+ logabsdet[inside_interval_mask],
82
+ ) = rational_quadratic_spline(
83
+ inputs=inputs[inside_interval_mask],
84
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
+ inverse=inverse,
88
+ left=-tail_bound,
89
+ right=tail_bound,
90
+ bottom=-tail_bound,
91
+ top=tail_bound,
92
+ min_bin_width=min_bin_width,
93
+ min_bin_height=min_bin_height,
94
+ min_derivative=min_derivative,
95
+ )
96
+
97
+ return outputs, logabsdet
98
+
99
+
100
+ def rational_quadratic_spline(
101
+ inputs,
102
+ unnormalized_widths,
103
+ unnormalized_heights,
104
+ unnormalized_derivatives,
105
+ inverse=False,
106
+ left=0.0,
107
+ right=1.0,
108
+ bottom=0.0,
109
+ top=1.0,
110
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
111
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
112
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
113
+ ):
114
+ if torch.min(inputs) < left or torch.max(inputs) > right:
115
+ raise ValueError("Input to a transform is not within its domain")
116
+
117
+ num_bins = unnormalized_widths.shape[-1]
118
+
119
+ if min_bin_width * num_bins > 1.0:
120
+ raise ValueError("Minimal bin width too large for the number of bins")
121
+ if min_bin_height * num_bins > 1.0:
122
+ raise ValueError("Minimal bin height too large for the number of bins")
123
+
124
+ widths = F.softmax(unnormalized_widths, dim=-1)
125
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
126
+ cumwidths = torch.cumsum(widths, dim=-1)
127
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
128
+ cumwidths = (right - left) * cumwidths + left
129
+ cumwidths[..., 0] = left
130
+ cumwidths[..., -1] = right
131
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
132
+
133
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
134
+
135
+ heights = F.softmax(unnormalized_heights, dim=-1)
136
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
137
+ cumheights = torch.cumsum(heights, dim=-1)
138
+ cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
139
+ cumheights = (top - bottom) * cumheights + bottom
140
+ cumheights[..., 0] = bottom
141
+ cumheights[..., -1] = top
142
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
143
+
144
+ if inverse:
145
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
146
+ else:
147
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
148
+
149
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
150
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
151
+
152
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
153
+ delta = heights / widths
154
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
155
+
156
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
157
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
158
+
159
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
160
+
161
+ if inverse:
162
+ a = (inputs - input_cumheights) * (
163
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
164
+ ) + input_heights * (input_delta - input_derivatives)
165
+ b = input_heights * input_derivatives - (inputs - input_cumheights) * (
166
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
167
+ )
168
+ c = -input_delta * (inputs - input_cumheights)
169
+
170
+ discriminant = b.pow(2) - 4 * a * c
171
+ assert (discriminant >= 0).all()
172
+
173
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
174
+ outputs = root * input_bin_widths + input_cumwidths
175
+
176
+ theta_one_minus_theta = root * (1 - root)
177
+ denominator = input_delta + (
178
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
179
+ * theta_one_minus_theta
180
+ )
181
+ derivative_numerator = input_delta.pow(2) * (
182
+ input_derivatives_plus_one * root.pow(2)
183
+ + 2 * input_delta * theta_one_minus_theta
184
+ + input_derivatives * (1 - root).pow(2)
185
+ )
186
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
187
+
188
+ return outputs, -logabsdet
189
+ else:
190
+ theta = (inputs - input_cumwidths) / input_bin_widths
191
+ theta_one_minus_theta = theta * (1 - theta)
192
+
193
+ numerator = input_heights * (
194
+ input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
195
+ )
196
+ denominator = input_delta + (
197
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
198
+ * theta_one_minus_theta
199
+ )
200
+ outputs = input_cumheights + numerator / denominator
201
+
202
+ derivative_numerator = input_delta.pow(2) * (
203
+ input_derivatives_plus_one * theta.pow(2)
204
+ + 2 * input_delta * theta_one_minus_theta
205
+ + input_derivatives * (1 - theta).pow(2)
206
+ )
207
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
208
+
209
+ return outputs, logabsdet
Bocchi-the-Rock/requirements.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wheel
2
+ setuptools
3
+ ffmpeg
4
+ torch
5
+ numba==0.56.4
6
+ numpy==1.23.5
7
+ scipy==1.9.3
8
+ librosa==0.9.1
9
+ fairseq==0.12.2
10
+ faiss-cpu==1.7.3
11
+ gradio==3.50.2
12
+ pyworld>=0.3.2
13
+ soundfile>=0.12.1
14
+ praat-parselmouth>=0.4.2
15
+ huggingface_hub>=0.20.0
16
+ httpx
17
+ tensorboard
18
+ tensorboardX
19
+ torchcrepe
20
+ onnxruntime
21
+ demucs
22
+ edge-tts
23
+ yt_dlp
24
+ python-dotenv
Bocchi-the-Rock/rmvpe.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5ed4719f59085d1affc5d81354c70828c740584f2d24e782523345a6a278962
3
+ size 181189687
Bocchi-the-Rock/rmvpe.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, torch, numpy as np, traceback, pdb
2
+ import torch.nn as nn
3
+ from time import time as ttime
4
+ import torch.nn.functional as F
5
+
6
+
7
+ class BiGRU(nn.Module):
8
+ def __init__(self, input_features, hidden_features, num_layers):
9
+ super(BiGRU, self).__init__()
10
+ self.gru = nn.GRU(
11
+ input_features,
12
+ hidden_features,
13
+ num_layers=num_layers,
14
+ batch_first=True,
15
+ bidirectional=True,
16
+ )
17
+
18
+ def forward(self, x):
19
+ return self.gru(x)[0]
20
+
21
+
22
+ class ConvBlockRes(nn.Module):
23
+ def __init__(self, in_channels, out_channels, momentum=0.01):
24
+ super(ConvBlockRes, self).__init__()
25
+ self.conv = nn.Sequential(
26
+ nn.Conv2d(
27
+ in_channels=in_channels,
28
+ out_channels=out_channels,
29
+ kernel_size=(3, 3),
30
+ stride=(1, 1),
31
+ padding=(1, 1),
32
+ bias=False,
33
+ ),
34
+ nn.BatchNorm2d(out_channels, momentum=momentum),
35
+ nn.ReLU(),
36
+ nn.Conv2d(
37
+ in_channels=out_channels,
38
+ out_channels=out_channels,
39
+ kernel_size=(3, 3),
40
+ stride=(1, 1),
41
+ padding=(1, 1),
42
+ bias=False,
43
+ ),
44
+ nn.BatchNorm2d(out_channels, momentum=momentum),
45
+ nn.ReLU(),
46
+ )
47
+ if in_channels != out_channels:
48
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
49
+ self.is_shortcut = True
50
+ else:
51
+ self.is_shortcut = False
52
+
53
+ def forward(self, x):
54
+ if self.is_shortcut:
55
+ return self.conv(x) + self.shortcut(x)
56
+ else:
57
+ return self.conv(x) + x
58
+
59
+
60
+ class Encoder(nn.Module):
61
+ def __init__(
62
+ self,
63
+ in_channels,
64
+ in_size,
65
+ n_encoders,
66
+ kernel_size,
67
+ n_blocks,
68
+ out_channels=16,
69
+ momentum=0.01,
70
+ ):
71
+ super(Encoder, self).__init__()
72
+ self.n_encoders = n_encoders
73
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
74
+ self.layers = nn.ModuleList()
75
+ self.latent_channels = []
76
+ for i in range(self.n_encoders):
77
+ self.layers.append(
78
+ ResEncoderBlock(
79
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
80
+ )
81
+ )
82
+ self.latent_channels.append([out_channels, in_size])
83
+ in_channels = out_channels
84
+ out_channels *= 2
85
+ in_size //= 2
86
+ self.out_size = in_size
87
+ self.out_channel = out_channels
88
+
89
+ def forward(self, x):
90
+ concat_tensors = []
91
+ x = self.bn(x)
92
+ for i in range(self.n_encoders):
93
+ _, x = self.layers[i](x)
94
+ concat_tensors.append(_)
95
+ return x, concat_tensors
96
+
97
+
98
+ class ResEncoderBlock(nn.Module):
99
+ def __init__(
100
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
101
+ ):
102
+ super(ResEncoderBlock, self).__init__()
103
+ self.n_blocks = n_blocks
104
+ self.conv = nn.ModuleList()
105
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
106
+ for i in range(n_blocks - 1):
107
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
108
+ self.kernel_size = kernel_size
109
+ if self.kernel_size is not None:
110
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
111
+
112
+ def forward(self, x):
113
+ for i in range(self.n_blocks):
114
+ x = self.conv[i](x)
115
+ if self.kernel_size is not None:
116
+ return x, self.pool(x)
117
+ else:
118
+ return x
119
+
120
+
121
+ class Intermediate(nn.Module): #
122
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
123
+ super(Intermediate, self).__init__()
124
+ self.n_inters = n_inters
125
+ self.layers = nn.ModuleList()
126
+ self.layers.append(
127
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
128
+ )
129
+ for i in range(self.n_inters - 1):
130
+ self.layers.append(
131
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
132
+ )
133
+
134
+ def forward(self, x):
135
+ for i in range(self.n_inters):
136
+ x = self.layers[i](x)
137
+ return x
138
+
139
+
140
+ class ResDecoderBlock(nn.Module):
141
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
142
+ super(ResDecoderBlock, self).__init__()
143
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
144
+ self.n_blocks = n_blocks
145
+ self.conv1 = nn.Sequential(
146
+ nn.ConvTranspose2d(
147
+ in_channels=in_channels,
148
+ out_channels=out_channels,
149
+ kernel_size=(3, 3),
150
+ stride=stride,
151
+ padding=(1, 1),
152
+ output_padding=out_padding,
153
+ bias=False,
154
+ ),
155
+ nn.BatchNorm2d(out_channels, momentum=momentum),
156
+ nn.ReLU(),
157
+ )
158
+ self.conv2 = nn.ModuleList()
159
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
160
+ for i in range(n_blocks - 1):
161
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
162
+
163
+ def forward(self, x, concat_tensor):
164
+ x = self.conv1(x)
165
+ x = torch.cat((x, concat_tensor), dim=1)
166
+ for i in range(self.n_blocks):
167
+ x = self.conv2[i](x)
168
+ return x
169
+
170
+
171
+ class Decoder(nn.Module):
172
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
173
+ super(Decoder, self).__init__()
174
+ self.layers = nn.ModuleList()
175
+ self.n_decoders = n_decoders
176
+ for i in range(self.n_decoders):
177
+ out_channels = in_channels // 2
178
+ self.layers.append(
179
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
180
+ )
181
+ in_channels = out_channels
182
+
183
+ def forward(self, x, concat_tensors):
184
+ for i in range(self.n_decoders):
185
+ x = self.layers[i](x, concat_tensors[-1 - i])
186
+ return x
187
+
188
+
189
+ class DeepUnet(nn.Module):
190
+ def __init__(
191
+ self,
192
+ kernel_size,
193
+ n_blocks,
194
+ en_de_layers=5,
195
+ inter_layers=4,
196
+ in_channels=1,
197
+ en_out_channels=16,
198
+ ):
199
+ super(DeepUnet, self).__init__()
200
+ self.encoder = Encoder(
201
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
202
+ )
203
+ self.intermediate = Intermediate(
204
+ self.encoder.out_channel // 2,
205
+ self.encoder.out_channel,
206
+ inter_layers,
207
+ n_blocks,
208
+ )
209
+ self.decoder = Decoder(
210
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
211
+ )
212
+
213
+ def forward(self, x):
214
+ x, concat_tensors = self.encoder(x)
215
+ x = self.intermediate(x)
216
+ x = self.decoder(x, concat_tensors)
217
+ return x
218
+
219
+
220
+ class E2E(nn.Module):
221
+ def __init__(
222
+ self,
223
+ n_blocks,
224
+ n_gru,
225
+ kernel_size,
226
+ en_de_layers=5,
227
+ inter_layers=4,
228
+ in_channels=1,
229
+ en_out_channels=16,
230
+ ):
231
+ super(E2E, self).__init__()
232
+ self.unet = DeepUnet(
233
+ kernel_size,
234
+ n_blocks,
235
+ en_de_layers,
236
+ inter_layers,
237
+ in_channels,
238
+ en_out_channels,
239
+ )
240
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
241
+ if n_gru:
242
+ self.fc = nn.Sequential(
243
+ BiGRU(3 * 128, 256, n_gru),
244
+ nn.Linear(512, 360),
245
+ nn.Dropout(0.25),
246
+ nn.Sigmoid(),
247
+ )
248
+ else:
249
+ self.fc = nn.Sequential(
250
+ nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
251
+ )
252
+
253
+ def forward(self, mel):
254
+ mel = mel.transpose(-1, -2).unsqueeze(1)
255
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
256
+ x = self.fc(x)
257
+ return x
258
+
259
+
260
+ from librosa.filters import mel
261
+
262
+
263
+ class MelSpectrogram(torch.nn.Module):
264
+ def __init__(
265
+ self,
266
+ is_half,
267
+ n_mel_channels,
268
+ sampling_rate,
269
+ win_length,
270
+ hop_length,
271
+ n_fft=None,
272
+ mel_fmin=0,
273
+ mel_fmax=None,
274
+ clamp=1e-5,
275
+ ):
276
+ super().__init__()
277
+ n_fft = win_length if n_fft is None else n_fft
278
+ self.hann_window = {}
279
+ mel_basis = mel(
280
+ sr=sampling_rate,
281
+ n_fft=n_fft,
282
+ n_mels=n_mel_channels,
283
+ fmin=mel_fmin,
284
+ fmax=mel_fmax,
285
+ htk=True,
286
+ )
287
+ mel_basis = torch.from_numpy(mel_basis).float()
288
+ self.register_buffer("mel_basis", mel_basis)
289
+ self.n_fft = win_length if n_fft is None else n_fft
290
+ self.hop_length = hop_length
291
+ self.win_length = win_length
292
+ self.sampling_rate = sampling_rate
293
+ self.n_mel_channels = n_mel_channels
294
+ self.clamp = clamp
295
+ self.is_half = is_half
296
+
297
+ def forward(self, audio, keyshift=0, speed=1, center=True):
298
+ factor = 2 ** (keyshift / 12)
299
+ n_fft_new = int(np.round(self.n_fft * factor))
300
+ win_length_new = int(np.round(self.win_length * factor))
301
+ hop_length_new = int(np.round(self.hop_length * speed))
302
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
303
+ if keyshift_key not in self.hann_window:
304
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
305
+ audio.device
306
+ )
307
+ fft = torch.stft(
308
+ audio,
309
+ n_fft=n_fft_new,
310
+ hop_length=hop_length_new,
311
+ win_length=win_length_new,
312
+ window=self.hann_window[keyshift_key],
313
+ center=center,
314
+ return_complex=True,
315
+ )
316
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
317
+ if keyshift != 0:
318
+ size = self.n_fft // 2 + 1
319
+ resize = magnitude.size(1)
320
+ if resize < size:
321
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
322
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
323
+ mel_output = torch.matmul(self.mel_basis, magnitude)
324
+ if self.is_half == True:
325
+ mel_output = mel_output.half()
326
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
327
+ return log_mel_spec
328
+
329
+
330
+ class RMVPE:
331
+ def __init__(self, model_path, is_half, device=None):
332
+ self.resample_kernel = {}
333
+ model = E2E(4, 1, (2, 2))
334
+ ckpt = torch.load(model_path, map_location="cpu")
335
+ model.load_state_dict(ckpt)
336
+ model.eval()
337
+ if is_half == True:
338
+ model = model.half()
339
+ self.model = model
340
+ self.resample_kernel = {}
341
+ self.is_half = is_half
342
+ if device is None:
343
+ device = "cuda" if torch.cuda.is_available() else "cpu"
344
+ self.device = device
345
+ self.mel_extractor = MelSpectrogram(
346
+ is_half, 128, 16000, 1024, 160, None, 30, 8000
347
+ ).to(device)
348
+ self.model = self.model.to(device)
349
+ cents_mapping = 20 * np.arange(360) + 1997.3794084376191
350
+ self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
351
+
352
+ def mel2hidden(self, mel):
353
+ with torch.no_grad():
354
+ n_frames = mel.shape[-1]
355
+ mel = F.pad(
356
+ mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
357
+ )
358
+ hidden = self.model(mel)
359
+ return hidden[:, :n_frames]
360
+
361
+ def decode(self, hidden, thred=0.03):
362
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
363
+ f0 = 10 * (2 ** (cents_pred / 1200))
364
+ f0[f0 == 10] = 0
365
+ # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
366
+ return f0
367
+
368
+ def infer_from_audio(self, audio, thred=0.03):
369
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
370
+ # torch.cuda.synchronize()
371
+ # t0=ttime()
372
+ mel = self.mel_extractor(audio, center=True)
373
+ # torch.cuda.synchronize()
374
+ # t1=ttime()
375
+ hidden = self.mel2hidden(mel)
376
+ # torch.cuda.synchronize()
377
+ # t2=ttime()
378
+ hidden = hidden.squeeze(0).cpu().numpy()
379
+ if self.is_half == True:
380
+ hidden = hidden.astype("float32")
381
+ f0 = self.decode(hidden, thred=thred)
382
+ # torch.cuda.synchronize()
383
+ # t3=ttime()
384
+ # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
385
+ return f0
386
+
387
+ def to_local_average_cents(self, salience, thred=0.05):
388
+ # t0 = ttime()
389
+ center = np.argmax(salience, axis=1) # 帧长#index
390
+ salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
391
+ # t1 = ttime()
392
+ center += 4
393
+ todo_salience = []
394
+ todo_cents_mapping = []
395
+ starts = center - 4
396
+ ends = center + 5
397
+ for idx in range(salience.shape[0]):
398
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
399
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
400
+ # t2 = ttime()
401
+ todo_salience = np.array(todo_salience) # 帧长,9
402
+ todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
403
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
404
+ weight_sum = np.sum(todo_salience, 1) # 帧长
405
+ devided = product_sum / weight_sum # 帧长
406
+ # t3 = ttime()
407
+ maxx = np.max(salience, axis=1) # 帧长
408
+ devided[maxx <= thred] = 0
409
+ # t4 = ttime()
410
+ # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
411
+ return devided
412
+
413
+
414
+ # if __name__ == '__main__':
415
+ # audio, sampling_rate = sf.read("卢本伟语录~1.wav")
416
+ # if len(audio.shape) > 1:
417
+ # audio = librosa.to_mono(audio.transpose(1, 0))
418
+ # audio_bak = audio.copy()
419
+ # if sampling_rate != 16000:
420
+ # audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
421
+ # model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt"
422
+ # thred = 0.03 # 0.01
423
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
424
+ # rmvpe = RMVPE(model_path,is_half=False, device=device)
425
+ # t0=ttime()
426
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
427
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
428
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
429
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
430
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
431
+ # t1=ttime()
432
+ # print(f0.shape,t1-t0)
Bocchi-the-Rock/vc_infer_pipeline.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np, parselmouth, torch, pdb, sys, os
2
+ from time import time as ttime
3
+ import torch.nn.functional as F
4
+ import scipy.signal as signal
5
+ import pyworld, os, traceback, faiss, librosa, torchcrepe
6
+ from scipy import signal
7
+ from functools import lru_cache
8
+
9
+ now_dir = os.getcwd()
10
+ sys.path.append(now_dir)
11
+
12
+ bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
13
+
14
+ input_audio_path2wav = {}
15
+
16
+
17
+ @lru_cache
18
+ def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
19
+ audio = input_audio_path2wav[input_audio_path]
20
+ f0, t = pyworld.harvest(
21
+ audio,
22
+ fs=fs,
23
+ f0_ceil=f0max,
24
+ f0_floor=f0min,
25
+ frame_period=frame_period,
26
+ )
27
+ f0 = pyworld.stonemask(audio, f0, t, fs)
28
+ return f0
29
+
30
+
31
+ def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
32
+ # print(data1.max(),data2.max())
33
+ rms1 = librosa.feature.rms(
34
+ y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
35
+ ) # 每半秒一个点
36
+ rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
37
+ rms1 = torch.from_numpy(rms1)
38
+ rms1 = F.interpolate(
39
+ rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
40
+ ).squeeze()
41
+ rms2 = torch.from_numpy(rms2)
42
+ rms2 = F.interpolate(
43
+ rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
44
+ ).squeeze()
45
+ rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
46
+ data2 *= (
47
+ torch.pow(rms1, torch.tensor(1 - rate))
48
+ * torch.pow(rms2, torch.tensor(rate - 1))
49
+ ).numpy()
50
+ return data2
51
+
52
+
53
+ class VC(object):
54
+ def __init__(self, tgt_sr, config):
55
+ self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
56
+ config.x_pad,
57
+ config.x_query,
58
+ config.x_center,
59
+ config.x_max,
60
+ config.is_half,
61
+ )
62
+ self.sr = 16000 # hubert输入采样率
63
+ self.window = 160 # 每帧点数
64
+ self.t_pad = self.sr * self.x_pad # 每条前后pad时间
65
+ self.t_pad_tgt = tgt_sr * self.x_pad
66
+ self.t_pad2 = self.t_pad * 2
67
+ self.t_query = self.sr * self.x_query # 查询切点前后查询时间
68
+ self.t_center = self.sr * self.x_center # 查询切点位置
69
+ self.t_max = self.sr * self.x_max # 免查询时长阈值
70
+ self.device = config.device
71
+
72
+ def get_f0(
73
+ self,
74
+ input_audio_path,
75
+ x,
76
+ p_len,
77
+ f0_up_key,
78
+ f0_method,
79
+ filter_radius,
80
+ inp_f0=None,
81
+ ):
82
+ global input_audio_path2wav
83
+ time_step = self.window / self.sr * 1000
84
+ f0_min = 50
85
+ f0_max = 1100
86
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
87
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
88
+ if f0_method == "pm":
89
+ f0 = (
90
+ parselmouth.Sound(x, self.sr)
91
+ .to_pitch_ac(
92
+ time_step=time_step / 1000,
93
+ voicing_threshold=0.6,
94
+ pitch_floor=f0_min,
95
+ pitch_ceiling=f0_max,
96
+ )
97
+ .selected_array["frequency"]
98
+ )
99
+ pad_size = (p_len - len(f0) + 1) // 2
100
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
101
+ f0 = np.pad(
102
+ f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
103
+ )
104
+ elif f0_method == "harvest":
105
+ input_audio_path2wav[input_audio_path] = x.astype(np.double)
106
+ f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
107
+ if filter_radius > 2:
108
+ f0 = signal.medfilt(f0, 3)
109
+ elif f0_method == "crepe":
110
+ model = "full"
111
+ # Pick a batch size that doesn't cause memory errors on your gpu
112
+ batch_size = 512
113
+ # Compute pitch using first gpu
114
+ audio = torch.tensor(np.copy(x))[None].float()
115
+ f0, pd = torchcrepe.predict(
116
+ audio,
117
+ self.sr,
118
+ self.window,
119
+ f0_min,
120
+ f0_max,
121
+ model,
122
+ batch_size=batch_size,
123
+ device=self.device,
124
+ return_periodicity=True,
125
+ )
126
+ pd = torchcrepe.filter.median(pd, 3)
127
+ f0 = torchcrepe.filter.mean(f0, 3)
128
+ f0[pd < 0.1] = 0
129
+ f0 = f0[0].cpu().numpy()
130
+ elif f0_method == "rmvpe":
131
+ if hasattr(self, "model_rmvpe") == False:
132
+ from rmvpe import RMVPE
133
+
134
+ print("loading rmvpe model")
135
+ self.model_rmvpe = RMVPE(
136
+ "rmvpe.pt", is_half=self.is_half, device=self.device
137
+ )
138
+ f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
139
+ f0 *= pow(2, f0_up_key / 12)
140
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
141
+ tf0 = self.sr // self.window # 每秒f0点数
142
+ if inp_f0 is not None:
143
+ delta_t = np.round(
144
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
145
+ ).astype("int16")
146
+ replace_f0 = np.interp(
147
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
148
+ )
149
+ shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
150
+ f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
151
+ :shape
152
+ ]
153
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
154
+ f0bak = f0.copy()
155
+ f0_mel = 1127 * np.log(1 + f0 / 700)
156
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
157
+ f0_mel_max - f0_mel_min
158
+ ) + 1
159
+ f0_mel[f0_mel <= 1] = 1
160
+ f0_mel[f0_mel > 255] = 255
161
+ f0_coarse = np.rint(f0_mel).astype(np.int)
162
+ return f0_coarse, f0bak # 1-0
163
+
164
+ def vc(
165
+ self,
166
+ model,
167
+ net_g,
168
+ sid,
169
+ audio0,
170
+ pitch,
171
+ pitchf,
172
+ times,
173
+ index,
174
+ big_npy,
175
+ index_rate,
176
+ version,
177
+ protect,
178
+ ): # ,file_index,file_big_npy
179
+ feats = torch.from_numpy(audio0)
180
+ if self.is_half:
181
+ feats = feats.half()
182
+ else:
183
+ feats = feats.float()
184
+ if feats.dim() == 2: # double channels
185
+ feats = feats.mean(-1)
186
+ assert feats.dim() == 1, feats.dim()
187
+ feats = feats.view(1, -1)
188
+ padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
189
+
190
+ inputs = {
191
+ "source": feats.to(self.device),
192
+ "padding_mask": padding_mask,
193
+ "output_layer": 9 if version == "v1" else 12,
194
+ }
195
+ t0 = ttime()
196
+ with torch.no_grad():
197
+ logits = model.extract_features(**inputs)
198
+ feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
199
+ if protect < 0.5 and pitch != None and pitchf != None:
200
+ feats0 = feats.clone()
201
+ if (
202
+ isinstance(index, type(None)) == False
203
+ and isinstance(big_npy, type(None)) == False
204
+ and index_rate != 0
205
+ ):
206
+ npy = feats[0].cpu().numpy()
207
+ if self.is_half:
208
+ npy = npy.astype("float32")
209
+
210
+ # _, I = index.search(npy, 1)
211
+ # npy = big_npy[I.squeeze()]
212
+
213
+ score, ix = index.search(npy, k=8)
214
+ weight = np.square(1 / score)
215
+ weight /= weight.sum(axis=1, keepdims=True)
216
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
217
+
218
+ if self.is_half:
219
+ npy = npy.astype("float16")
220
+ feats = (
221
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
222
+ + (1 - index_rate) * feats
223
+ )
224
+
225
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
226
+ if protect < 0.5 and pitch != None and pitchf != None:
227
+ feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
228
+ 0, 2, 1
229
+ )
230
+ t1 = ttime()
231
+ p_len = audio0.shape[0] // self.window
232
+ if feats.shape[1] < p_len:
233
+ p_len = feats.shape[1]
234
+ if pitch != None and pitchf != None:
235
+ pitch = pitch[:, :p_len]
236
+ pitchf = pitchf[:, :p_len]
237
+
238
+ if protect < 0.5 and pitch != None and pitchf != None:
239
+ pitchff = pitchf.clone()
240
+ pitchff[pitchf > 0] = 1
241
+ pitchff[pitchf < 1] = protect
242
+ pitchff = pitchff.unsqueeze(-1)
243
+ feats = feats * pitchff + feats0 * (1 - pitchff)
244
+ feats = feats.to(feats0.dtype)
245
+ p_len = torch.tensor([p_len], device=self.device).long()
246
+ with torch.no_grad():
247
+ if pitch != None and pitchf != None:
248
+ audio1 = (
249
+ (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
250
+ .data.cpu()
251
+ .float()
252
+ .numpy()
253
+ )
254
+ else:
255
+ audio1 = (
256
+ (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
257
+ )
258
+ del feats, p_len, padding_mask
259
+ if torch.cuda.is_available():
260
+ torch.cuda.empty_cache()
261
+ t2 = ttime()
262
+ times[0] += t1 - t0
263
+ times[2] += t2 - t1
264
+ return audio1
265
+
266
+ def pipeline(
267
+ self,
268
+ model,
269
+ net_g,
270
+ sid,
271
+ audio,
272
+ input_audio_path,
273
+ times,
274
+ f0_up_key,
275
+ f0_method,
276
+ file_index,
277
+ # file_big_npy,
278
+ index_rate,
279
+ if_f0,
280
+ filter_radius,
281
+ tgt_sr,
282
+ resample_sr,
283
+ rms_mix_rate,
284
+ version,
285
+ protect,
286
+ f0_file=None,
287
+ ):
288
+ if (
289
+ file_index != ""
290
+ # and file_big_npy != ""
291
+ # and os.path.exists(file_big_npy) == True
292
+ and os.path.exists(file_index) == True
293
+ and index_rate != 0
294
+ ):
295
+ try:
296
+ index = faiss.read_index(file_index)
297
+ # big_npy = np.load(file_big_npy)
298
+ big_npy = index.reconstruct_n(0, index.ntotal)
299
+ except:
300
+ traceback.print_exc()
301
+ index = big_npy = None
302
+ else:
303
+ index = big_npy = None
304
+ audio = signal.filtfilt(bh, ah, audio)
305
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
306
+ opt_ts = []
307
+ if audio_pad.shape[0] > self.t_max:
308
+ audio_sum = np.zeros_like(audio)
309
+ for i in range(self.window):
310
+ audio_sum += audio_pad[i : i - self.window]
311
+ for t in range(self.t_center, audio.shape[0], self.t_center):
312
+ opt_ts.append(
313
+ t
314
+ - self.t_query
315
+ + np.where(
316
+ np.abs(audio_sum[t - self.t_query : t + self.t_query])
317
+ == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
318
+ )[0][0]
319
+ )
320
+ s = 0
321
+ audio_opt = []
322
+ t = None
323
+ t1 = ttime()
324
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
325
+ p_len = audio_pad.shape[0] // self.window
326
+ inp_f0 = None
327
+ if hasattr(f0_file, "name") == True:
328
+ try:
329
+ with open(f0_file.name, "r") as f:
330
+ lines = f.read().strip("\n").split("\n")
331
+ inp_f0 = []
332
+ for line in lines:
333
+ inp_f0.append([float(i) for i in line.split(",")])
334
+ inp_f0 = np.array(inp_f0, dtype="float32")
335
+ except:
336
+ traceback.print_exc()
337
+ sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
338
+ pitch, pitchf = None, None
339
+ if if_f0 == 1:
340
+ pitch, pitchf = self.get_f0(
341
+ input_audio_path,
342
+ audio_pad,
343
+ p_len,
344
+ f0_up_key,
345
+ f0_method,
346
+ filter_radius,
347
+ inp_f0,
348
+ )
349
+ pitch = pitch[:p_len]
350
+ pitchf = pitchf[:p_len]
351
+ if self.device == "mps":
352
+ pitchf = pitchf.astype(np.float32)
353
+ pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
354
+ pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
355
+ t2 = ttime()
356
+ times[1] += t2 - t1
357
+ for t in opt_ts:
358
+ t = t // self.window * self.window
359
+ if if_f0 == 1:
360
+ audio_opt.append(
361
+ self.vc(
362
+ model,
363
+ net_g,
364
+ sid,
365
+ audio_pad[s : t + self.t_pad2 + self.window],
366
+ pitch[:, s // self.window : (t + self.t_pad2) // self.window],
367
+ pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
368
+ times,
369
+ index,
370
+ big_npy,
371
+ index_rate,
372
+ version,
373
+ protect,
374
+ )[self.t_pad_tgt : -self.t_pad_tgt]
375
+ )
376
+ else:
377
+ audio_opt.append(
378
+ self.vc(
379
+ model,
380
+ net_g,
381
+ sid,
382
+ audio_pad[s : t + self.t_pad2 + self.window],
383
+ None,
384
+ None,
385
+ times,
386
+ index,
387
+ big_npy,
388
+ index_rate,
389
+ version,
390
+ protect,
391
+ )[self.t_pad_tgt : -self.t_pad_tgt]
392
+ )
393
+ s = t
394
+ if if_f0 == 1:
395
+ audio_opt.append(
396
+ self.vc(
397
+ model,
398
+ net_g,
399
+ sid,
400
+ audio_pad[t:],
401
+ pitch[:, t // self.window :] if t is not None else pitch,
402
+ pitchf[:, t // self.window :] if t is not None else pitchf,
403
+ times,
404
+ index,
405
+ big_npy,
406
+ index_rate,
407
+ version,
408
+ protect,
409
+ )[self.t_pad_tgt : -self.t_pad_tgt]
410
+ )
411
+ else:
412
+ audio_opt.append(
413
+ self.vc(
414
+ model,
415
+ net_g,
416
+ sid,
417
+ audio_pad[t:],
418
+ None,
419
+ None,
420
+ times,
421
+ index,
422
+ big_npy,
423
+ index_rate,
424
+ version,
425
+ protect,
426
+ )[self.t_pad_tgt : -self.t_pad_tgt]
427
+ )
428
+ audio_opt = np.concatenate(audio_opt)
429
+ if rms_mix_rate != 1:
430
+ audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
431
+ if resample_sr >= 16000 and tgt_sr != resample_sr:
432
+ audio_opt = librosa.resample(
433
+ audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
434
+ )
435
+ audio_max = np.abs(audio_opt).max() / 0.99
436
+ max_int16 = 32768
437
+ if audio_max > 1:
438
+ max_int16 /= audio_max
439
+ audio_opt = (audio_opt * max_int16).astype(np.int16)
440
+ del pitch, pitchf, sid
441
+ if torch.cuda.is_available():
442
+ torch.cuda.empty_cache()
443
+ return audio_opt