dlxj commited on
Commit
d121da6
·
1 Parent(s): ab1cfde

生成 rnnt eou 训练数据集

Browse files
Files changed (2) hide show
  1. readme.txt +7 -0
  2. sou_eou_time.py +56 -0
readme.txt CHANGED
@@ -243,6 +243,13 @@ python examples/asr/asr_cache_aware_streaming/speech_to_text_cache_aware_streami
243
 
244
 
245
 
 
 
 
 
 
 
 
246
  它是不是真的流式输入,流式输出,为什么它是一下子打印全部结果,而不是边推理边输出部分结果 `examples\asr\asr_cache_aware_streaming\speech_to_text_cache_aware_streaming_infer.py`
247
 
248
  它**确实是真流式**的(前提是你用了支持 Cache-aware 的模型并搭配这个脚本)。
 
243
 
244
 
245
 
246
+ 生成 rnnt eou 数据集
247
+ python sou_eou_time.py
248
+ 复用前面 CTC 生成的数据,只是加字段
249
+
250
+
251
+
252
+
253
  它是不是真的流式输入,流式输出,为什么它是一下子打印全部结果,而不是边推理边输出部分结果 `examples\asr\asr_cache_aware_streaming\speech_to_text_cache_aware_streaming_infer.py`
254
 
255
  它**确实是真流式**的(前提是你用了支持 Cache-aware 的模型并搭配这个脚本)。
sou_eou_time.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ def process_manifest(input_path):
5
+ if not os.path.exists(input_path):
6
+ print(f"Warning: File not found {input_path}")
7
+ return
8
+
9
+ print(f"Processing {input_path} in-place")
10
+
11
+ processed_count = 0
12
+ updated_lines = []
13
+
14
+ # 1. Read and process all lines
15
+ with open(input_path, 'r', encoding='utf-8') as f_in:
16
+ for line in f_in:
17
+ line = line.strip()
18
+ if not line:
19
+ continue
20
+
21
+ data = json.loads(line)
22
+
23
+ if 'duration' not in data:
24
+ print(f"Warning: 'duration' missing in a record, skipping: {data}")
25
+ updated_lines.append(line + '\n')
26
+ continue
27
+
28
+ duration = float(data['duration'])
29
+
30
+ if 'offset' not in data:
31
+ data['offset'] = 0.0
32
+
33
+ if 'sou_time' not in data:
34
+ data['sou_time'] = 0.0
35
+
36
+ if 'eou_time' not in data:
37
+ data['eou_time'] = duration
38
+
39
+ updated_lines.append(json.dumps(data, ensure_ascii=False) + '\n')
40
+ processed_count += 1
41
+
42
+ # 2. Write back to the same file
43
+ with open(input_path, 'w', encoding='utf-8') as f_out:
44
+ f_out.writelines(updated_lines)
45
+
46
+ print(f"Successfully processed {processed_count} records in {input_path}.")
47
+
48
+ if __name__ == '__main__':
49
+ files_to_process = [
50
+ r"data\common_voice_11_0\ja\train_tarred_1bk\tarred_audio_manifest.json",
51
+ r"data\common_voice_11_0\ja\validation\validation_common_voice_11_0_manifest.json",
52
+ r"data\common_voice_11_0\ja\test\test_common_voice_11_0_manifest.json"
53
+ ]
54
+
55
+ for file_path in files_to_process:
56
+ process_manifest(file_path)