LH-Tech-AI commited on
Commit
e9fa195
·
0 Parent(s):

Duplicate from LH-Tech-AI/Flare-TTS-v1.5

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ pipeline_tag: text-to-speech
5
+ tags:
6
+ - tts
7
+ - flare
8
+ - open
9
+ - open-source
10
+ - small
11
+ - speech
12
+ - text-to-speech
13
+ - tiny
14
+ - cpu
15
+ datasets:
16
+ - keithito/lj_speech
17
+ ---
18
+
19
+ # 🎙️ Flare-TTS v1.5 28M
20
+ Welcome to Flare-TTS **v1.5** 28M, an open-source text-to-speech model with 28 million parameters trained on LJSpeech.
21
+ <br>
22
+ This is an improved version of Flare-TTS 28M (v1) which is now using a vocoder to remove these robotic sounds!
23
+
24
+ ## Quality and results
25
+ This model has a much better quality now, it doesn't sound robotish anymore and you can clearly understand what the model says.
26
+ <br>
27
+ Example:
28
+
29
+
30
+ <audio controls src="https://cdn-uploads.huggingface.co/production/uploads/697f2832c2c5e4daa93cece7/qohVONCxMMh-68Z2U7r46.wav"></audio>
31
+
32
+ ## Training process
33
+ We trained the vocoder for 72 epochs on a single A6000 GPU for ~10 hours. Note that this model is based on the first version Flare-TTS 28M.
34
+ Furthermore, this model now uses a vocoder - see train_vocoder.py for more information and the full code.
35
+ The full training code for the vocoder can be found in this repo as `prepare.sh` and `train_vocoder.py`.
36
+ <br>
37
+ The full pretraining code is here: https://huggingface.co/LH-Tech-AI/Flare-TTS-28M/tree/main
38
+
39
+ ## Architecture
40
+ This model was trained using CoquiTTS. For the architecture we chose GlowTTS.
41
+
42
+ ## Training dataset
43
+ We trained on the full LJSpeech dataset. Thanks to keithito for this :-)
44
+
45
+ ## How to use
46
+ As soon as you have the model checkpoint (`model.pth`) and `config.json` on your device, you can generate a sample using:
47
+ ```bash
48
+ tts --text "Hello, world! This is the second version of Flare-TTS - now with a vocoder. The robot sounds are finally gone!" \
49
+ --model_path ./model.pth \
50
+ --config_path ./config.json \
51
+ --vocoder_path ./vocoder_15000_checkpoint.pth \
52
+ --vocoder_config_path ./vocoder_config.json \
53
+ --out_path output_1.wav
54
+ ```
55
+
56
+ ## Final thoughts
57
+ This model is much better in the audio quality than the first version of Flare-TTS 28M.
58
+ <br>
59
+ But stay tuned for a third version with more features! :D
config.json ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "/home/ubuntu",
3
+ "logger_uri": null,
4
+ "run_name": "run",
5
+ "project_name": null,
6
+ "run_description": "\ud83d\udc38Coqui trainer run.",
7
+ "print_step": 25,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "save_on_interrupt": true,
13
+ "log_model_step": null,
14
+ "save_step": 10000,
15
+ "save_n_checkpoints": 5,
16
+ "save_checkpoints": true,
17
+ "save_all_best": false,
18
+ "save_best_after": 0,
19
+ "target_loss": null,
20
+ "print_eval": false,
21
+ "test_delay_epochs": -1,
22
+ "run_eval": true,
23
+ "run_eval_steps": null,
24
+ "distributed_backend": "nccl",
25
+ "distributed_url": "tcp://localhost:54321",
26
+ "mixed_precision": true,
27
+ "precision": "fp16",
28
+ "epochs": 600,
29
+ "batch_size": 256,
30
+ "eval_batch_size": 128,
31
+ "grad_clip": 5.0,
32
+ "scheduler_after_epoch": true,
33
+ "lr": 0.001,
34
+ "optimizer": "RAdam",
35
+ "optimizer_params": {
36
+ "betas": [
37
+ 0.9,
38
+ 0.998
39
+ ],
40
+ "weight_decay": 1e-06
41
+ },
42
+ "lr_scheduler": "NoamLR",
43
+ "lr_scheduler_params": {
44
+ "warmup_steps": 4000
45
+ },
46
+ "use_grad_scaler": false,
47
+ "allow_tf32": false,
48
+ "cudnn_enable": true,
49
+ "cudnn_deterministic": false,
50
+ "cudnn_benchmark": false,
51
+ "training_seed": 54321,
52
+ "model": "glow_tts",
53
+ "num_loader_workers": 4,
54
+ "num_eval_loader_workers": 2,
55
+ "use_noise_augment": false,
56
+ "audio": {
57
+ "fft_size": 1024,
58
+ "win_length": 1024,
59
+ "hop_length": 256,
60
+ "frame_shift_ms": null,
61
+ "frame_length_ms": null,
62
+ "stft_pad_mode": "reflect",
63
+ "sample_rate": 22050,
64
+ "resample": false,
65
+ "preemphasis": 0.0,
66
+ "ref_level_db": 20,
67
+ "do_sound_norm": false,
68
+ "log_func": "np.log10",
69
+ "do_trim_silence": true,
70
+ "trim_db": 45,
71
+ "do_rms_norm": false,
72
+ "db_level": null,
73
+ "power": 1.5,
74
+ "griffin_lim_iters": 60,
75
+ "num_mels": 80,
76
+ "mel_fmin": 0.0,
77
+ "mel_fmax": null,
78
+ "spec_gain": 20,
79
+ "do_amp_to_db_linear": true,
80
+ "do_amp_to_db_mel": true,
81
+ "pitch_fmax": 640.0,
82
+ "pitch_fmin": 1.0,
83
+ "signal_norm": true,
84
+ "min_level_db": -100,
85
+ "symmetric_norm": true,
86
+ "max_norm": 4.0,
87
+ "clip_norm": true,
88
+ "stats_path": null
89
+ },
90
+ "model_args": {},
91
+ "_supports_cloning": false,
92
+ "languages": [
93
+ "en-us"
94
+ ],
95
+ "speakers": [],
96
+ "use_phonemes": true,
97
+ "phonemizer": "espeak",
98
+ "phoneme_language": "en-us",
99
+ "compute_input_seq_cache": false,
100
+ "text_cleaner": "phoneme_cleaners",
101
+ "enable_eos_bos_chars": false,
102
+ "test_sentences_file": "",
103
+ "phoneme_cache_path": "/home/ubuntu/phoneme_cache",
104
+ "characters": {
105
+ "characters_class": "TTS.tts.utils.text.characters.IPAPhonemes",
106
+ "vocab_dict": null,
107
+ "pad": "<PAD>",
108
+ "eos": "<EOS>",
109
+ "bos": "<BOS>",
110
+ "blank": "<BLNK>",
111
+ "characters": "iy\u0268\u0289\u026fu\u026a\u028f\u028ae\u00f8\u0258\u0259\u0275\u0264o\u025b\u0153\u025c\u025e\u028c\u0254\u00e6\u0250a\u0276\u0251\u0252\u1d7b\u0298\u0253\u01c0\u0257\u01c3\u0284\u01c2\u0260\u01c1\u029bpbtd\u0288\u0256c\u025fk\u0261q\u0262\u0294\u0274\u014b\u0272\u0273n\u0271m\u0299r\u0280\u2c71\u027e\u027d\u0278\u03b2fv\u03b8\u00f0sz\u0283\u0292\u0282\u0290\u00e7\u029dx\u0263\u03c7\u0281\u0127\u0295h\u0266\u026c\u026e\u028b\u0279\u027bj\u0270l\u026d\u028e\u029f\u02c8\u02cc\u02d0\u02d1\u028dw\u0265\u029c\u02a2\u02a1\u0255\u0291\u027a\u0267\u02b2\u0303\u025a\u02de\u026b",
112
+ "punctuations": "!'(),-.:;? ",
113
+ "phonemes": null,
114
+ "is_unique": false,
115
+ "is_sorted": true
116
+ },
117
+ "add_blank": false,
118
+ "batch_group_size": 0,
119
+ "loss_masking": null,
120
+ "min_audio_len": 22050,
121
+ "max_audio_len": 220500,
122
+ "min_text_len": 1,
123
+ "max_text_len": Infinity,
124
+ "compute_f0": false,
125
+ "compute_energy": false,
126
+ "compute_linear_spec": false,
127
+ "precompute_num_workers": 0,
128
+ "start_by_longest": false,
129
+ "shuffle": false,
130
+ "drop_last": false,
131
+ "datasets": [
132
+ {
133
+ "formatter": "ljspeech",
134
+ "dataset_name": "",
135
+ "path": "/home/ubuntu/LJSpeech-1.1/",
136
+ "meta_file_train": "metadata.csv",
137
+ "ignored_speakers": null,
138
+ "language": "",
139
+ "phonemizer": "",
140
+ "meta_file_val": "",
141
+ "meta_file_attn_mask": ""
142
+ }
143
+ ],
144
+ "test_sentences": [
145
+ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
146
+ "Be a voice, not an echo.",
147
+ "I'm sorry Dave. I'm afraid I can't do that.",
148
+ "This cake is great. It's so delicious and moist.",
149
+ "Prior to November 22, 1963."
150
+ ],
151
+ "eval_split_max_size": null,
152
+ "eval_split_size": 0.01,
153
+ "use_speaker_weighted_sampler": false,
154
+ "speaker_weighted_sampler_alpha": 1.0,
155
+ "use_language_weighted_sampler": false,
156
+ "language_weighted_sampler_alpha": 1.0,
157
+ "use_length_weighted_sampler": false,
158
+ "length_weighted_sampler_alpha": 1.0,
159
+ "num_chars": 132,
160
+ "use_encoder_prenet": true,
161
+ "hidden_channels_enc": 192,
162
+ "hidden_channels_dec": 192,
163
+ "hidden_channels_dp": 256,
164
+ "dropout_p_dp": 0.1,
165
+ "dropout_p_dec": 0.05,
166
+ "mean_only": true,
167
+ "out_channels": 80,
168
+ "num_flow_blocks_dec": 12,
169
+ "kernel_size_dec": 5,
170
+ "dilation_rate": 1,
171
+ "num_block_layers": 4,
172
+ "c_in_channels": 0,
173
+ "num_splits": 4,
174
+ "num_squeeze": 2,
175
+ "sigmoid_scale": false,
176
+ "encoder_type": "rel_pos_transformer",
177
+ "encoder_params": {
178
+ "kernel_size": 3,
179
+ "dropout_p": 0.1,
180
+ "num_layers": 6,
181
+ "num_heads": 2,
182
+ "hidden_channels_ffn": 768,
183
+ "input_length": null
184
+ },
185
+ "d_vector_dim": 0,
186
+ "data_dep_init_steps": 10,
187
+ "style_wav_for_test": null,
188
+ "inference_noise_scale": 0.0,
189
+ "length_scale": 1.0,
190
+ "use_speaker_embedding": false,
191
+ "speakers_file": null,
192
+ "use_d_vector_file": false,
193
+ "d_vector_file": null,
194
+ "min_seq_len": 3,
195
+ "max_seq_len": 500,
196
+ "r": 1,
197
+ "github_branch": "inside_docker"
198
+ }
model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52e49ef2cc522d6f5fad8b6bd6932cfaf8bc42ea22911d9d63ffbdda951a44f3
3
+ size 343850470
prepare.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Installs
2
+ pip install git+https://github.com/idiap/coqui-tts.git
3
+ sudo apt update && sudo apt install espeak -y
4
+ sudo apt install ffmpeg libavcodec-dev libavformat-dev libavutil-dev -y
5
+ pip install "coqui-tts[codec]"
6
+ wget https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
7
+ tar -xjf LJSpeech-1.1.tar.bz2
8
+ wget https://huggingface.co/LH-Tech-AI/Flare-TTS-28M/resolve/main/model.pth
9
+ wget https://huggingface.co/LH-Tech-AI/Flare-TTS-28M/resolve/main/config.json
10
+
11
+ # Start training
12
+ screen -S vocoder
13
+ PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True python3 train_vocoder.py
train_vocoder.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vocoder.py
2
+ import os
3
+ from trainer import Trainer, TrainerArgs
4
+ from TTS.utils.audio import AudioProcessor
5
+ from TTS.config.shared_configs import BaseAudioConfig
6
+ from TTS.vocoder.configs import HifiganConfig
7
+ from TTS.vocoder.datasets.preprocess import load_wav_data
8
+ from TTS.vocoder.models.gan import GAN
9
+
10
+
11
+ def main():
12
+ output_path = os.path.dirname(os.path.abspath(__file__))
13
+ data_path = os.path.join(output_path, "LJSpeech-1.1/wavs/")
14
+
15
+ audio_config = BaseAudioConfig(
16
+ sample_rate=22050,
17
+ resample=False,
18
+ do_trim_silence=True,
19
+ trim_db=45,
20
+
21
+ fft_size=1024,
22
+ win_length=1024,
23
+ hop_length=256,
24
+ frame_shift_ms=None,
25
+ frame_length_ms=None,
26
+
27
+ num_mels=80,
28
+ mel_fmin=0.0,
29
+ mel_fmax=None,
30
+
31
+ signal_norm=True,
32
+ symmetric_norm=True,
33
+ max_norm=4.0,
34
+ clip_norm=True,
35
+ ref_level_db=20,
36
+ min_level_db=-100,
37
+ spec_gain=20.0,
38
+ log_func="np.log10",
39
+ preemphasis=0.0,
40
+
41
+ stats_path=None,
42
+ )
43
+
44
+ config = HifiganConfig(
45
+ run_name="hifigan_ljspeech",
46
+ run_description="HiFi-GAN v1 from scratch, GlowTTS-compatible mels",
47
+
48
+ data_path=data_path,
49
+ output_path=output_path,
50
+ eval_split_size=10,
51
+
52
+ audio=audio_config,
53
+
54
+ epochs=2000,
55
+ batch_size=64,
56
+ eval_batch_size=16,
57
+ num_loader_workers=4,
58
+ num_eval_loader_workers=2,
59
+ run_eval=True,
60
+ test_delay_epochs=5,
61
+ mixed_precision=True,
62
+
63
+ seq_len=8192,
64
+ pad_short=2000,
65
+ use_noise_augment=True,
66
+
67
+ lr_gen=2e-4,
68
+ lr_disc=2e-4,
69
+
70
+ print_step=50,
71
+ print_eval=False,
72
+ save_step=5000,
73
+ save_n_checkpoints=5,
74
+ save_checkpoints=True,
75
+ log_model_step=10000,
76
+ plot_step=500,
77
+ )
78
+
79
+ ap = AudioProcessor(config=config.audio)
80
+
81
+ eval_samples, train_samples = load_wav_data(
82
+ config.data_path,
83
+ config.eval_split_size,
84
+ )
85
+
86
+ model = GAN(config)
87
+
88
+ trainer = Trainer(
89
+ TrainerArgs(),
90
+ config,
91
+ output_path,
92
+ model=model,
93
+ train_samples=train_samples,
94
+ eval_samples=eval_samples,
95
+ training_assets={"audio_processor": ap},
96
+ )
97
+
98
+ trainer.fit()
99
+
100
+
101
+ if __name__ == "__main__":
102
+ main()
vocoder_10000_checkpoint.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b64d44b94f6c71ad36825528405fb2bea374944cb78b22709260c4dfeddabbde
3
+ size 1016516407
vocoder_15000_checkpoint.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0a6ae4e9c311ea4aef2b44905f81e6b86a9094bd8f1fffb5276429dc75e2482
3
+ size 1016516407
vocoder_5000_checkpoint.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5ade14ee92c9a42f522b852e2a87c7e029722ade94040b85ab34e032e97882e
3
+ size 1016516407
vocoder_7995_checkpoint.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ba221e8f5b210572208c3c504e1d2b5f34ad88836370af51f7c8de5d27c3105
3
+ size 1016516471
vocoder_config.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "/home/ubuntu",
3
+ "logger_uri": null,
4
+ "run_name": "hifigan_ljspeech",
5
+ "project_name": null,
6
+ "run_description": "HiFi-GAN v1 from scratch, GlowTTS-compatible mels",
7
+ "print_step": 50,
8
+ "plot_step": 500,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "save_on_interrupt": true,
13
+ "log_model_step": 10000,
14
+ "save_step": 5000,
15
+ "save_n_checkpoints": 5,
16
+ "save_checkpoints": true,
17
+ "save_all_best": false,
18
+ "save_best_after": 0,
19
+ "target_loss": "loss_0",
20
+ "print_eval": false,
21
+ "test_delay_epochs": 5,
22
+ "run_eval": true,
23
+ "run_eval_steps": null,
24
+ "distributed_backend": "nccl",
25
+ "distributed_url": "tcp://localhost:54321",
26
+ "mixed_precision": true,
27
+ "precision": "fp16",
28
+ "epochs": 2000,
29
+ "batch_size": 64,
30
+ "eval_batch_size": 16,
31
+ "grad_clip": [
32
+ 5,
33
+ 5
34
+ ],
35
+ "scheduler_after_epoch": true,
36
+ "lr": 0.0001,
37
+ "optimizer": "AdamW",
38
+ "optimizer_params": {
39
+ "betas": [
40
+ 0.8,
41
+ 0.99
42
+ ],
43
+ "weight_decay": 0.0
44
+ },
45
+ "lr_scheduler": null,
46
+ "lr_scheduler_params": {},
47
+ "use_grad_scaler": false,
48
+ "allow_tf32": false,
49
+ "cudnn_enable": true,
50
+ "cudnn_deterministic": false,
51
+ "cudnn_benchmark": false,
52
+ "training_seed": 54321,
53
+ "model": "hifigan",
54
+ "num_loader_workers": 4,
55
+ "num_eval_loader_workers": 2,
56
+ "use_noise_augment": true,
57
+ "audio": {
58
+ "fft_size": 1024,
59
+ "win_length": 1024,
60
+ "hop_length": 256,
61
+ "frame_shift_ms": null,
62
+ "frame_length_ms": null,
63
+ "stft_pad_mode": "reflect",
64
+ "sample_rate": 22050,
65
+ "resample": false,
66
+ "preemphasis": 0.0,
67
+ "ref_level_db": 20,
68
+ "do_sound_norm": false,
69
+ "log_func": "np.log10",
70
+ "do_trim_silence": true,
71
+ "trim_db": 45,
72
+ "do_rms_norm": false,
73
+ "db_level": null,
74
+ "power": 1.5,
75
+ "griffin_lim_iters": 60,
76
+ "num_mels": 80,
77
+ "mel_fmin": 0.0,
78
+ "mel_fmax": null,
79
+ "spec_gain": 20.0,
80
+ "do_amp_to_db_linear": true,
81
+ "do_amp_to_db_mel": true,
82
+ "pitch_fmax": 640.0,
83
+ "pitch_fmin": 1.0,
84
+ "signal_norm": true,
85
+ "min_level_db": -100,
86
+ "symmetric_norm": true,
87
+ "max_norm": 4.0,
88
+ "clip_norm": true,
89
+ "stats_path": null
90
+ },
91
+ "eval_split_size": 10,
92
+ "data_path": "/home/ubuntu/LJSpeech-1.1/wavs/",
93
+ "feature_path": null,
94
+ "seq_len": 8192,
95
+ "pad_short": 2000,
96
+ "conv_pad": 0,
97
+ "use_cache": false,
98
+ "wd": 1e-06,
99
+ "use_stft_loss": false,
100
+ "use_subband_stft_loss": false,
101
+ "use_mse_gan_loss": true,
102
+ "use_hinge_gan_loss": false,
103
+ "use_feat_match_loss": true,
104
+ "use_l1_spec_loss": true,
105
+ "stft_loss_weight": 0,
106
+ "subband_stft_loss_weight": 0,
107
+ "mse_G_loss_weight": 1,
108
+ "hinge_G_loss_weight": 0,
109
+ "feat_match_loss_weight": 108,
110
+ "l1_spec_loss_weight": 45,
111
+ "stft_loss_params": {
112
+ "n_ffts": [
113
+ 1024,
114
+ 2048,
115
+ 512
116
+ ],
117
+ "hop_lengths": [
118
+ 120,
119
+ 240,
120
+ 50
121
+ ],
122
+ "win_lengths": [
123
+ 600,
124
+ 1200,
125
+ 240
126
+ ]
127
+ },
128
+ "l1_spec_loss_params": {
129
+ "use_mel": true,
130
+ "sample_rate": 22050,
131
+ "n_fft": 1024,
132
+ "hop_length": 256,
133
+ "win_length": 1024,
134
+ "n_mels": 80,
135
+ "mel_fmin": 0.0,
136
+ "mel_fmax": null
137
+ },
138
+ "lr_gen": 0.0002,
139
+ "lr_disc": 0.0002,
140
+ "lr_scheduler_gen": "ExponentialLR",
141
+ "lr_scheduler_gen_params": {
142
+ "gamma": 0.999,
143
+ "last_epoch": -1
144
+ },
145
+ "lr_scheduler_disc": "ExponentialLR",
146
+ "lr_scheduler_disc_params": {
147
+ "gamma": 0.999,
148
+ "last_epoch": -1
149
+ },
150
+ "use_pqmf": false,
151
+ "steps_to_start_discriminator": 0,
152
+ "diff_samples_for_G_and_D": false,
153
+ "discriminator_model": "hifigan_discriminator",
154
+ "generator_model": "hifigan_generator",
155
+ "generator_model_params": {
156
+ "upsample_factors": [
157
+ 8,
158
+ 8,
159
+ 2,
160
+ 2
161
+ ],
162
+ "upsample_kernel_sizes": [
163
+ 16,
164
+ 16,
165
+ 4,
166
+ 4
167
+ ],
168
+ "upsample_initial_channel": 512,
169
+ "resblock_kernel_sizes": [
170
+ 3,
171
+ 7,
172
+ 11
173
+ ],
174
+ "resblock_dilation_sizes": [
175
+ [
176
+ 1,
177
+ 3,
178
+ 5
179
+ ],
180
+ [
181
+ 1,
182
+ 3,
183
+ 5
184
+ ],
185
+ [
186
+ 1,
187
+ 3,
188
+ 5
189
+ ]
190
+ ],
191
+ "resblock_type": "1",
192
+ "in_channels": 80
193
+ },
194
+ "github_branch": "inside_docker"
195
+ }