omarsherif9 commited on
Commit
fe4f1c7
·
verified ·
1 Parent(s): 2034f6b

Upload livekit-turn-detector

Browse files
Files changed (31) hide show
  1. .gitattributes +2 -0
  2. livekit-turn-detector/blobs/03d79d95d1b482aa600d4af7649cb91eb4125eec +33 -0
  3. livekit-turn-detector/blobs/0ad5ecc2035b7031b88afb544ee95e2d49baa484 +0 -0
  4. livekit-turn-detector/blobs/179ad5365c3b58dca5228ed0eb85552e5238d979 +115 -0
  5. livekit-turn-detector/blobs/2f7b4c93c1cdb6d1e858b01f63e5f0f15bdb979dd0d177e8996a244c80e03925 +3 -0
  6. livekit-turn-detector/blobs/4e685767c3643b0363c9f826a98325683f29e9c7d550162c8e8740ba33aa31aa +3 -0
  7. livekit-turn-detector/blobs/69503b13f727ba3812b6803e97442a6de05ef5eb +0 -0
  8. livekit-turn-detector/blobs/a0d1aa8a0fe51e70b4eea91a7f2621cbf4c18347 +6 -0
  9. livekit-turn-detector/blobs/a1f02012c770e70739be68ede87513228a4cecbf +172 -0
  10. livekit-turn-detector/blobs/a6344aac8c09253b3b630fb776ae94478aa0275b +35 -0
  11. livekit-turn-detector/blobs/affea6f611e0ae3127c83032bc77f6a6f53f9f00 +0 -0
  12. livekit-turn-detector/blobs/c2a58348e2cb8cc98e72a3db29c347dd9d961e28 +32 -0
  13. livekit-turn-detector/blobs/db3ce48a6fcd0b741da0f6410866a77b1731d263 +185 -0
  14. livekit-turn-detector/blobs/ef52d45d1c6f53be3d8ee6091be8cdf6a702d996 +4 -0
  15. livekit-turn-detector/blobs/ff763bc19f24aaef5ff45e25dc6c21ce095c2d79 +36 -0
  16. livekit-turn-detector/refs/main +1 -0
  17. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/.gitattributes +35 -0
  18. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/LICENSE +115 -0
  19. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/README.md +185 -0
  20. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/added_tokens.json +4 -0
  21. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/config.json +32 -0
  22. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/generation_config.json +6 -0
  23. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/merges.txt +0 -0
  24. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/model.safetensors +3 -0
  25. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/model_q8.onnx +3 -0
  26. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/model_quantized.onnx +3 -0
  27. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/ort_config.json +33 -0
  28. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/special_tokens_map.json +36 -0
  29. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/tokenizer.json +0 -0
  30. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/tokenizer_config.json +172 -0
  31. livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/vocab.json +0 -0
.gitattributes CHANGED
@@ -44,3 +44,5 @@ embedding/blobs/f061cb7641880f52895cbacab7c4ab39b0844e2e6b73794f2798de460d9fa418
44
  embedding/blobs/f60256a833caee5c75a3903e589116752ee016ca7bc16f9b96e4db09984c5703 filter=lfs diff=lfs merge=lfs -text
45
  embedding/snapshots/835193815a3936a24a0ee7dc9e3d48c1fbb19c55/onnx/tokenizer.json filter=lfs diff=lfs merge=lfs -text
46
  embedding/snapshots/835193815a3936a24a0ee7dc9e3d48c1fbb19c55/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
44
  embedding/blobs/f60256a833caee5c75a3903e589116752ee016ca7bc16f9b96e4db09984c5703 filter=lfs diff=lfs merge=lfs -text
45
  embedding/snapshots/835193815a3936a24a0ee7dc9e3d48c1fbb19c55/onnx/tokenizer.json filter=lfs diff=lfs merge=lfs -text
46
  embedding/snapshots/835193815a3936a24a0ee7dc9e3d48c1fbb19c55/tokenizer.json filter=lfs diff=lfs merge=lfs -text
47
+ livekit-turn-detector/blobs/2f7b4c93c1cdb6d1e858b01f63e5f0f15bdb979dd0d177e8996a244c80e03925 filter=lfs diff=lfs merge=lfs -text
48
+ livekit-turn-detector/blobs/4e685767c3643b0363c9f826a98325683f29e9c7d550162c8e8740ba33aa31aa filter=lfs diff=lfs merge=lfs -text
livekit-turn-detector/blobs/03d79d95d1b482aa600d4af7649cb91eb4125eec ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "one_external_file": true,
3
+ "opset": null,
4
+ "optimization": {},
5
+ "quantization": {
6
+ "activations_dtype": "QUInt8",
7
+ "activations_symmetric": false,
8
+ "format": "QOperator",
9
+ "is_static": false,
10
+ "mode": "IntegerOps",
11
+ "nodes_to_exclude": [],
12
+ "nodes_to_quantize": [],
13
+ "operators_to_quantize": [
14
+ "Conv",
15
+ "MatMul",
16
+ "Attention",
17
+ "LSTM",
18
+ "Gather",
19
+ "Transpose",
20
+ "EmbedLayerNormalization"
21
+ ],
22
+ "per_channel": false,
23
+ "qdq_add_pair_to_weight": false,
24
+ "qdq_dedicated_pair": false,
25
+ "qdq_op_type_per_channel_support_to_axis": {
26
+ "MatMul": 1
27
+ },
28
+ "reduce_range": false,
29
+ "weights_dtype": "QInt8",
30
+ "weights_symmetric": true
31
+ },
32
+ "use_external_data_format": false
33
+ }
livekit-turn-detector/blobs/0ad5ecc2035b7031b88afb544ee95e2d49baa484 ADDED
The diff for this file is too large to render. See raw diff
 
livekit-turn-detector/blobs/179ad5365c3b58dca5228ed0eb85552e5238d979 ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LIVEKIT MODEL LICENSE AGREEMENT
2
+
3
+ 1. Introduction
4
+
5
+ LiveKit Incorporated ("LiveKit") is making available its proprietary models for
6
+ use pursuant to the terms and conditions of this Agreement. As further
7
+ described below, you may use these LiveKit models freely but can only use them
8
+ together with the LiveKit Agents framework. You cannot use the LiveKit models
9
+ on a standalone basis or with any other frameworks.
10
+
11
+ BY CLICKING "I ACCEPT," OR BY DOWNLOADING, INSTALLING, OR OTHERWISE ACCESSING
12
+ OR USING THE LIVEKIT MATERIALS, YOU AGREE THAT YOU HAVE READ AND UNDERSTOOD,
13
+ AND, AS A CONDITION TO YOUR USE OF THE LIVEKIT MATERIALS, YOU AGREE TO BE
14
+ BOUND BY, THE FOLLOWING TERMS AND CONDITIONS.
15
+
16
+ 2. Definitions
17
+
18
+ "Agreement" means this LiveKit Model License Agreement.
19
+
20
+ "Documentation" means the specifications, manuals, and documentation
21
+ accompanying any LiveKit Model and distributed by LiveKit.
22
+
23
+ "Licensee" or "you" means the individual or entity agreeing to be bound by
24
+ this Agreement.
25
+
26
+ "LiveKit Agents" means the proprietary LiveKit software framework for building
27
+ real-time multimodal AI applications with programmable backend participants.
28
+
29
+ "LiveKit Materials" means, collectively, the LiveKit Models and Documentation.
30
+
31
+ "LiveKit Model" means any of LiveKit's proprietary software models or
32
+ algorithms, including machine-learning software code, model weights,
33
+ inference-enabling software code, training-enabling software code, and
34
+ fine-tuning enabling software code. Any derivative works of a LiveKit Model,
35
+ whether developed by LiveKit, you, or any third party, will be deemed the
36
+ "LiveKit Model" for the purposes of this Agreement.
37
+
38
+ 3. License Rights
39
+
40
+ Right to Use LiveKit Materials. Subject to the terms and conditions of this
41
+ Agreement, including the requirements of Section 3.b, LiveKit grants you a
42
+ nonexclusive, nontransferable, worldwide, royalty-free license under LiveKit's
43
+ intellectual property rights to use, reproduce, distribute, copy, and create
44
+ derivative works of the LiveKit Materials.
45
+
46
+ Limitation on Use. As a condition to your use of the LiveKit Materials, you
47
+ agree: (i) not to use any LiveKit Models on a standalone basis or with any
48
+ frameworks other than LiveKit Agents; (ii) not to use any LiveKit Materials or
49
+ any output from, or results of using, LiveKit Models (including any derivative
50
+ works thereof) to improve or otherwise develop any other models that are not
51
+ LiveKit Models; or (iii) distribute or otherwise make available the LiveKit
52
+ Materials (including any derivative works thereof) except (x) pursuant to the
53
+ terms of this Agreement, and (y) you reproduce the above copyright notice.
54
+
55
+ 4. Intellectual Property
56
+
57
+ The LiveKit Materials are owned by LiveKit and its licensors. Except for the
58
+ rights granted to you under this Agreement, all rights are reserved and no
59
+ other express or implied rights are granted.
60
+
61
+ You will own any derivative works that you created from the LiveKit Materials,
62
+ subject to the terms of this Agreement.
63
+
64
+ 5. Disclaimer
65
+
66
+ UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING, LIVEKIT PROVIDES
67
+ THE LIVEKIT MATERIALS, AND ANY OUTPUT OR RESULTS THEREFROM, ON AN "AS IS"
68
+ BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
69
+ INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE,
70
+ NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU
71
+ ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR
72
+ REDISTRIBUTING THE LIVEKIT MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
73
+ USE OF THE LIVEKIT MATERIALS AND ANY OUTPUT AND RESULTS.
74
+
75
+ 6. Limitation of Liability
76
+
77
+ IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE),
78
+ CONTRACT, OR OTHERWISE, UNLESS REQUIRED BY APPLICABLE LAW (SUCH AS DELIBERATE
79
+ AND GROSSLY NEGLIGENT ACTS) OR AGREED TO IN WRITING, WILL LIVEKIT BE LIABLE TO
80
+ YOU FOR INDIRECT DAMAGES, INCLUDING ANY SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
81
+ DAMAGES OF ANY CHARACTER ARISING AS A RESULT OF THIS AGREEMENT OR OUT OF THE
82
+ USE OR INABILITY TO USE THE LIVEKIT MATERIALS OR ANY OUTPUT OR RESULTS
83
+ THEREFROM (INCLUDING BUT NOT LIMITED TO DAMAGES FOR LOSS OF GOODWILL, WORK
84
+ STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL
85
+ DAMAGES OR LOSSES), EVEN IF LIVEKIT HAS BEEN ADVISED OF THE POSSIBILITY OF
86
+ SUCH DAMAGES.
87
+
88
+ 7. Trademarks
89
+
90
+ This Agreement does not grant permission to use the trade names, trademarks,
91
+ service marks, or product names of LiveKit, except as required for reasonable
92
+ and customary use in describing the origin of the LiveKit Materials.
93
+
94
+ 8. Term and Termination
95
+
96
+ The term of this Agreement commences upon your acceptance of this Agreement
97
+ and continues in effect until you cease using the LiveKit Materials or it is
98
+ terminated by either party (on immediate written notice to the other party).
99
+ This Agreement will automatically terminate if you breach any of its terms.
100
+ Upon termination, you must immediately cease all use of the LiveKit Materials.
101
+ Sections 4, 5, 6, and 9 will survive termination.
102
+
103
+ 9. Governing Law and Venue
104
+
105
+ This Agreement is subject to the laws of the State of California, without
106
+ regard to its conflict of laws principles. The UN Convention on Contracts for
107
+ the International Sale of Goods does not apply to this Agreement. The courts
108
+ located in San Francisco, California, have exclusive jurisdiction for any
109
+ dispute arising out of this Agreement.
110
+
111
+ + + + +
112
+
113
+ Last Updated: November 25, 2024
114
+
115
+
livekit-turn-detector/blobs/2f7b4c93c1cdb6d1e858b01f63e5f0f15bdb979dd0d177e8996a244c80e03925 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f7b4c93c1cdb6d1e858b01f63e5f0f15bdb979dd0d177e8996a244c80e03925
3
+ size 538095016
livekit-turn-detector/blobs/4e685767c3643b0363c9f826a98325683f29e9c7d550162c8e8740ba33aa31aa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e685767c3643b0363c9f826a98325683f29e9c7d550162c8e8740ba33aa31aa
3
+ size 165035487
livekit-turn-detector/blobs/69503b13f727ba3812b6803e97442a6de05ef5eb ADDED
The diff for this file is too large to render. See raw diff
 
livekit-turn-detector/blobs/a0d1aa8a0fe51e70b4eea91a7f2621cbf4c18347 ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.46.3"
6
+ }
livekit-turn-detector/blobs/a1f02012c770e70739be68ede87513228a4cecbf ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "49152": {
141
+ "content": "<|user|>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "49153": {
149
+ "content": "<|assistant|>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ }
156
+ },
157
+ "additional_special_tokens": [
158
+ "<|im_start|>",
159
+ "<|im_end|>",
160
+ "<|user|>",
161
+ "<|assistant|>"
162
+ ],
163
+ "bos_token": "<|endoftext|>",
164
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + '<|' + message['role'] + '|>' + message['content'] + '<|im_end|>'}}{% endfor %}",
165
+ "clean_up_tokenization_spaces": false,
166
+ "eos_token": "<|endoftext|>",
167
+ "model_max_length": 8192,
168
+ "pad_token": "<|endoftext|>",
169
+ "tokenizer_class": "GPT2Tokenizer",
170
+ "unk_token": "<|endoftext|>",
171
+ "vocab_size": 49152
172
+ }
livekit-turn-detector/blobs/a6344aac8c09253b3b630fb776ae94478aa0275b ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
livekit-turn-detector/blobs/affea6f611e0ae3127c83032bc77f6a6f53f9f00 ADDED
The diff for this file is too large to render. See raw diff
 
livekit-turn-detector/blobs/c2a58348e2cb8cc98e72a3db29c347dd9d961e28 ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/tmp/tmpdhw4_gdh",
4
+ "architectures": [
5
+ "LlamaForCausalLM"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 0,
10
+ "eos_token_id": 0,
11
+ "head_dim": 64,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 576,
14
+ "initializer_range": 0.041666666666666664,
15
+ "intermediate_size": 1536,
16
+ "is_llama_config": true,
17
+ "max_position_embeddings": 8192,
18
+ "mlp_bias": false,
19
+ "model_type": "llama",
20
+ "num_attention_heads": 9,
21
+ "num_hidden_layers": 30,
22
+ "num_key_value_heads": 3,
23
+ "pretraining_tp": 1,
24
+ "rms_norm_eps": 1e-05,
25
+ "rope_interleaved": false,
26
+ "rope_scaling": null,
27
+ "rope_theta": 100000,
28
+ "tie_word_embeddings": true,
29
+ "transformers_version": "4.46.3",
30
+ "use_cache": true,
31
+ "vocab_size": 49154
32
+ }
livekit-turn-detector/blobs/db3ce48a6fcd0b741da0f6410866a77b1731d263 ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - es
5
+ - fr
6
+ - de
7
+ - it
8
+ - pt
9
+ - nl
10
+ - zh
11
+ - ja
12
+ - ko
13
+ - id
14
+ - tr
15
+ - ru
16
+ - hi
17
+ license: other
18
+ license_name: livekit-model-license
19
+ license_link: LICENSE
20
+ library_name: transformers
21
+ pipeline_tag: text-classification
22
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
23
+ tags:
24
+ - voice-ai
25
+ - turn-detection
26
+ - end-of-utterance
27
+ - end-of-turn
28
+ - conversational-ai
29
+ - livekit
30
+ - onnx
31
+ - quantized
32
+ - knowledge-distillation
33
+ ---
34
+
35
+ # LiveKit Turn Detector
36
+
37
+ An open-weights language model for contextually-aware end-of-utterance (EOU) detection in voice AI applications. The model predicts whether a user has finished speaking based on the semantic content of their transcribed speech, providing a critical complement to voice activity detection (VAD) systems.
38
+
39
+ > **📖 For installation, usage examples, and integration guides, see the [LiveKit documentation](https://docs.livekit.io/agents/logic/turns/turn-detector/).**
40
+
41
+ ## Table of Contents
42
+
43
+ - [Overview](#overview)
44
+ - [Model Variants](#model-variants)
45
+ - [How It Works](#how-it-works)
46
+ - [Architecture and Training](#architecture-and-training)
47
+ - [Supported Languages](#supported-languages)
48
+ - [Benchmarks](#benchmarks)
49
+ - [Usage](#usage)
50
+ - [Deployment Requirements](#deployment-requirements)
51
+ - [Limitations](#limitations)
52
+ - [License](LICENSE)
53
+ - [Resources](#resources)
54
+
55
+ ## Overview
56
+
57
+ Traditional voice agents rely on voice activity detection (VAD) to determine when a user has finished speaking. VAD works by detecting the presence or absence of speech in an audio signal and applying a silence timer. While effective for detecting pauses, VAD lacks language understanding and frequently causes false positives. For example, a user who says *"I need to think about that for a moment..."* and then pauses will be interrupted by a VAD-only system, even though they clearly intend to continue.
58
+
59
+ This model adds semantic understanding to the turn detection process. It analyzes the transcribed text of a conversation in real time and predicts the probability that the user has completed their turn. When integrated into a voice pipeline alongside VAD, it substantially reduces unwanted interruptions while maintaining responsiveness.
60
+
61
+ The model is particularly effective in scenarios involving structured data input — such as dictating addresses, phone numbers, email addresses, and credit card numbers — where natural pauses between segments do not indicate completion.
62
+
63
+ ## Model Variants
64
+
65
+ **Multilingual** (recommended) and **English-only** (deprecated) are distributed as INT8 quantized ONNX models (`model_q8.onnx`) optimized for CPU inference.
66
+
67
+ > **⚠️ The English-only model (`EnglishModel`) is deprecated.** Use the **multilingual model (`MultilingualModel`)** for all new projects, including English-only applications. The multilingual model provides better accuracy across all languages — including English — thanks to knowledge distillation from a larger teacher model and an expanded training dataset. The English-only variant will not receive further updates.
68
+
69
+ ## How It Works
70
+
71
+ The model operates on transcribed text from a speech-to-text (STT) system, not raw audio.
72
+
73
+ 1. **Input**: The recent conversation history (up to **6 turns**, truncated to **128 tokens**) is formatted using the [Qwen chat template](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) with `<|im_start|>` / `<|im_end|>` delimiters. The final user message is left *without* the closing `<|im_end|>` token.
74
+
75
+ 2. **Prediction**: The model predicts the probability of the `<|im_end|>` token appearing next. A **high probability** indicates the user has likely finished their utterance. A **low probability** indicates they are likely to continue.
76
+
77
+ 3. **Thresholding**: Per-language thresholds (stored in `languages.json`) convert the raw probability into a binary decision. These thresholds are tuned to balance responsiveness and accuracy for each supported language.
78
+
79
+ 4. **Integration with VAD**: In the LiveKit Agents framework, the model works alongside the [Silero VAD](https://docs.livekit.io/agents/logic/turns/vad/) plugin. VAD handles speech presence detection and interruption triggering, while this model provides the semantic signal for when to commit a turn.
80
+
81
+ ### Text Preprocessing
82
+
83
+ The **multilingual** variant applies the following normalization before inference:
84
+
85
+ - NFKC unicode normalization
86
+ - Lowercasing
87
+ - Punctuation removal (preserving apostrophes and hyphens)
88
+ - Whitespace collapsing
89
+
90
+ The **English-only** variant passes raw transcribed text without normalization.
91
+
92
+ ## Architecture and Training
93
+
94
+ ### Base Model
95
+
96
+ Both variants are fine-tuned from [Qwen/Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct), selected for its strong performance on this task while enabling low-latency CPU inference.
97
+
98
+ ### Knowledge Distillation
99
+
100
+ A **Qwen2.5-7B-Instruct** model was first fine-tuned as a teacher on end-of-turn prediction. Its knowledge was then distilled into the 0.5B student model. The distilled model approaches teacher-level accuracy while maintaining the efficiency of the smaller architecture, converging after approximately 1,500 training steps.
101
+
102
+ ### Training Data
103
+
104
+ The training dataset is a mix of:
105
+
106
+ - **Real call center transcripts** covering diverse conversational patterns
107
+ - **Synthetic dialogues** emphasizing structured data input — addresses, email addresses, phone numbers, and credit card numbers
108
+ - **Multi-format STT outputs** to handle provider variation (e.g., "forty two" vs. "42"), ensuring consistent predictions across different STT engines without runtime overhead
109
+
110
+ Although structured data enhancements were added only to the English training set, performance improvements generalized across languages due to the multilingual knowledge encoded in the Qwen2.5 base model.
111
+
112
+ ### Quantization
113
+
114
+ The trained model is exported to ONNX format and quantized to INT8 (`model_q8.onnx`), enabling efficient CPU-only inference with ONNX Runtime.
115
+
116
+ ## Supported Languages
117
+
118
+ The multilingual model supports 14 languages. The model relies on the STT provider to report the detected language, which is then used to select the appropriate per-language threshold.
119
+
120
+ English, Spanish, French, German, Italian, Portuguese, Dutch, Chinese, Japanese, Korean, Indonesian, Turkish, Russian, Hindi
121
+
122
+ ## Benchmarks
123
+
124
+ ### Detection Accuracy (Multilingual Variant)
125
+
126
+ - **True positive** — the model correctly identifies the user has finished speaking.
127
+ - **True negative** — the model correctly identifies the user will continue speaking.
128
+
129
+ | Language | True Positive Rate | True Negative Rate |
130
+ |---|---|---|
131
+ | Hindi | 99.4% | 96.3% |
132
+ | Korean | 99.3% | 94.5% |
133
+ | French | 99.3% | 88.9% |
134
+ | Indonesian | 99.3% | 89.4% |
135
+ | Japanese | 99.3% | 88.8% |
136
+ | Dutch | 99.3% | 88.1% |
137
+ | Russian | 99.3% | 88.0% |
138
+ | German | 99.3% | 87.8% |
139
+ | Portuguese | 99.4% | 87.4% |
140
+ | Turkish | 99.3% | 87.3% |
141
+ | English | 99.3% | 87.0% |
142
+ | Chinese | 99.3% | 86.6% |
143
+ | Spanish | 99.3% | 86.0% |
144
+ | Italian | 99.3% | 85.1% |
145
+
146
+ ### Improvement Over Prior Version
147
+
148
+ The multilingual v0.4.1 release achieved a **39.23% relative improvement** in handling structured inputs (emails, addresses, phone numbers, credit card numbers) compared to the prior version, reducing premature interruptions during data collection scenarios.
149
+
150
+ ## Usage
151
+
152
+ The model is designed for use as a turn detection plugin within the [LiveKit Agents](https://github.com/livekit/agents) framework.
153
+
154
+ For complete installation instructions, code examples (Python and Node.js), and configuration options, see the **[LiveKit turn detector plugin documentation](https://docs.livekit.io/agents/logic/turns/turn-detector/)**.
155
+
156
+ For broader context on how turn detection fits into the voice pipeline — including VAD configuration, interruption handling, and manual turn control — see the **[Turns overview](https://docs.livekit.io/agents/logic/turns/)**.
157
+
158
+ ## Deployment Requirements
159
+
160
+ - **Runtime**: CPU-only (no GPU required). Uses [ONNX Runtime](https://onnxruntime.ai/) with the `CPUExecutionProvider`.
161
+ - **RAM**: <500 MB for the multilingual model.
162
+ - **Instance type**: Use compute-optimized instances (e.g., AWS c6i, c7i). Avoid burstable instances (e.g., AWS t3, t4g) to prevent inference timeouts from CPU credit exhaustion.
163
+ - **LiveKit Cloud**: The model is deployed globally on LiveKit Cloud. Agents running there automatically use the optimized remote inference service with no local resource requirements.
164
+
165
+ ## Limitations
166
+
167
+ - **Text-only input**: The model operates on STT-transcribed text and cannot incorporate prosodic cues such as pauses, intonation, or emphasis. Future versions may integrate multimodal audio features.
168
+ - **STT dependency**: Prediction quality depends on the accuracy and output format of the upstream STT provider. Mismatches between training and deployment STT formats may degrade performance.
169
+ - **Context window**: Limited to 128 tokens across a maximum of 6 conversation turns.
170
+ - **Language coverage**: Currently supports 14 languages. Performance on unsupported languages is undefined.
171
+ - **Realtime model compatibility**: Cannot be used with audio-native realtime models (e.g., OpenAI Realtime API) without adding a separate STT service, which incurs additional cost and latency.
172
+
173
+ ## License
174
+
175
+ This model is released under the [LiveKit Model License](./LICENSE).
176
+
177
+ ## Resources
178
+
179
+ - **[Documentation](https://docs.livekit.io/agents/logic/turns/turn-detector/)**: Full plugin documentation, installation, and integration guide.
180
+ - **[Turns Overview](https://docs.livekit.io/agents/logic/turns/)**: How turn detection fits into the LiveKit Agents voice pipeline.
181
+ - **[Blog: Improved End-of-Turn Model](https://blog.livekit.io/improved-end-of-turn-model-cuts-voice-ai-interruptions-39/)**: Technical deep dive on the multilingual distillation approach and benchmarks.
182
+ - **[Blog: Using a Transformer for Turn Detection](https://blog.livekit.io/using-a-transformer-to-improve-end-of-turn-detection/)**: Original blog post introducing the concept and architecture.
183
+ - **[Video: LiveKit Turn Detector](https://youtu.be/OZG0oZKctgw)**: Overview video demonstrating the plugin.
184
+ - **[GitHub: Plugin Source](https://github.com/livekit/agents/tree/main/livekit-plugins/livekit-plugins-turn-detector)**: Source code for the `livekit-plugins-turn-detector` package.
185
+ - **[PyPI](https://pypi.org/project/livekit-plugins-turn-detector/)** | **[npm](https://www.npmjs.com/package/@livekit/agents-plugin-livekit)**: Package registries.
livekit-turn-detector/blobs/ef52d45d1c6f53be3d8ee6091be8cdf6a702d996 ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 49153,
3
+ "<|user|>": 49152
4
+ }
livekit-turn-detector/blobs/ff763bc19f24aaef5ff45e25dc6c21ce095c2d79 ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|user|>",
6
+ "<|assistant|>"
7
+ ],
8
+ "bos_token": {
9
+ "content": "<|endoftext|>",
10
+ "lstrip": false,
11
+ "normalized": false,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "eos_token": {
16
+ "content": "<|endoftext|>",
17
+ "lstrip": false,
18
+ "normalized": false,
19
+ "rstrip": false,
20
+ "single_word": false
21
+ },
22
+ "pad_token": {
23
+ "content": "<|endoftext|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "unk_token": {
30
+ "content": "<|endoftext|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
livekit-turn-detector/refs/main ADDED
@@ -0,0 +1 @@
 
 
1
+ fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/LICENSE ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LIVEKIT MODEL LICENSE AGREEMENT
2
+
3
+ 1. Introduction
4
+
5
+ LiveKit Incorporated ("LiveKit") is making available its proprietary models for
6
+ use pursuant to the terms and conditions of this Agreement. As further
7
+ described below, you may use these LiveKit models freely but can only use them
8
+ together with the LiveKit Agents framework. You cannot use the LiveKit models
9
+ on a standalone basis or with any other frameworks.
10
+
11
+ BY CLICKING "I ACCEPT," OR BY DOWNLOADING, INSTALLING, OR OTHERWISE ACCESSING
12
+ OR USING THE LIVEKIT MATERIALS, YOU AGREE THAT YOU HAVE READ AND UNDERSTOOD,
13
+ AND, AS A CONDITION TO YOUR USE OF THE LIVEKIT MATERIALS, YOU AGREE TO BE
14
+ BOUND BY, THE FOLLOWING TERMS AND CONDITIONS.
15
+
16
+ 2. Definitions
17
+
18
+ "Agreement" means this LiveKit Model License Agreement.
19
+
20
+ "Documentation" means the specifications, manuals, and documentation
21
+ accompanying any LiveKit Model and distributed by LiveKit.
22
+
23
+ "Licensee" or "you" means the individual or entity agreeing to be bound by
24
+ this Agreement.
25
+
26
+ "LiveKit Agents" means the proprietary LiveKit software framework for building
27
+ real-time multimodal AI applications with programmable backend participants.
28
+
29
+ "LiveKit Materials" means, collectively, the LiveKit Models and Documentation.
30
+
31
+ "LiveKit Model" means any of LiveKit's proprietary software models or
32
+ algorithms, including machine-learning software code, model weights,
33
+ inference-enabling software code, training-enabling software code, and
34
+ fine-tuning enabling software code. Any derivative works of a LiveKit Model,
35
+ whether developed by LiveKit, you, or any third party, will be deemed the
36
+ "LiveKit Model" for the purposes of this Agreement.
37
+
38
+ 3. License Rights
39
+
40
+ Right to Use LiveKit Materials. Subject to the terms and conditions of this
41
+ Agreement, including the requirements of Section 3.b, LiveKit grants you a
42
+ nonexclusive, nontransferable, worldwide, royalty-free license under LiveKit's
43
+ intellectual property rights to use, reproduce, distribute, copy, and create
44
+ derivative works of the LiveKit Materials.
45
+
46
+ Limitation on Use. As a condition to your use of the LiveKit Materials, you
47
+ agree: (i) not to use any LiveKit Models on a standalone basis or with any
48
+ frameworks other than LiveKit Agents; (ii) not to use any LiveKit Materials or
49
+ any output from, or results of using, LiveKit Models (including any derivative
50
+ works thereof) to improve or otherwise develop any other models that are not
51
+ LiveKit Models; or (iii) distribute or otherwise make available the LiveKit
52
+ Materials (including any derivative works thereof) except (x) pursuant to the
53
+ terms of this Agreement, and (y) you reproduce the above copyright notice.
54
+
55
+ 4. Intellectual Property
56
+
57
+ The LiveKit Materials are owned by LiveKit and its licensors. Except for the
58
+ rights granted to you under this Agreement, all rights are reserved and no
59
+ other express or implied rights are granted.
60
+
61
+ You will own any derivative works that you created from the LiveKit Materials,
62
+ subject to the terms of this Agreement.
63
+
64
+ 5. Disclaimer
65
+
66
+ UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING, LIVEKIT PROVIDES
67
+ THE LIVEKIT MATERIALS, AND ANY OUTPUT OR RESULTS THEREFROM, ON AN "AS IS"
68
+ BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
69
+ INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE,
70
+ NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU
71
+ ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR
72
+ REDISTRIBUTING THE LIVEKIT MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
73
+ USE OF THE LIVEKIT MATERIALS AND ANY OUTPUT AND RESULTS.
74
+
75
+ 6. Limitation of Liability
76
+
77
+ IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE),
78
+ CONTRACT, OR OTHERWISE, UNLESS REQUIRED BY APPLICABLE LAW (SUCH AS DELIBERATE
79
+ AND GROSSLY NEGLIGENT ACTS) OR AGREED TO IN WRITING, WILL LIVEKIT BE LIABLE TO
80
+ YOU FOR INDIRECT DAMAGES, INCLUDING ANY SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
81
+ DAMAGES OF ANY CHARACTER ARISING AS A RESULT OF THIS AGREEMENT OR OUT OF THE
82
+ USE OR INABILITY TO USE THE LIVEKIT MATERIALS OR ANY OUTPUT OR RESULTS
83
+ THEREFROM (INCLUDING BUT NOT LIMITED TO DAMAGES FOR LOSS OF GOODWILL, WORK
84
+ STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL
85
+ DAMAGES OR LOSSES), EVEN IF LIVEKIT HAS BEEN ADVISED OF THE POSSIBILITY OF
86
+ SUCH DAMAGES.
87
+
88
+ 7. Trademarks
89
+
90
+ This Agreement does not grant permission to use the trade names, trademarks,
91
+ service marks, or product names of LiveKit, except as required for reasonable
92
+ and customary use in describing the origin of the LiveKit Materials.
93
+
94
+ 8. Term and Termination
95
+
96
+ The term of this Agreement commences upon your acceptance of this Agreement
97
+ and continues in effect until you cease using the LiveKit Materials or it is
98
+ terminated by either party (on immediate written notice to the other party).
99
+ This Agreement will automatically terminate if you breach any of its terms.
100
+ Upon termination, you must immediately cease all use of the LiveKit Materials.
101
+ Sections 4, 5, 6, and 9 will survive termination.
102
+
103
+ 9. Governing Law and Venue
104
+
105
+ This Agreement is subject to the laws of the State of California, without
106
+ regard to its conflict of laws principles. The UN Convention on Contracts for
107
+ the International Sale of Goods does not apply to this Agreement. The courts
108
+ located in San Francisco, California, have exclusive jurisdiction for any
109
+ dispute arising out of this Agreement.
110
+
111
+ + + + +
112
+
113
+ Last Updated: November 25, 2024
114
+
115
+
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/README.md ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - es
5
+ - fr
6
+ - de
7
+ - it
8
+ - pt
9
+ - nl
10
+ - zh
11
+ - ja
12
+ - ko
13
+ - id
14
+ - tr
15
+ - ru
16
+ - hi
17
+ license: other
18
+ license_name: livekit-model-license
19
+ license_link: LICENSE
20
+ library_name: transformers
21
+ pipeline_tag: text-classification
22
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
23
+ tags:
24
+ - voice-ai
25
+ - turn-detection
26
+ - end-of-utterance
27
+ - end-of-turn
28
+ - conversational-ai
29
+ - livekit
30
+ - onnx
31
+ - quantized
32
+ - knowledge-distillation
33
+ ---
34
+
35
+ # LiveKit Turn Detector
36
+
37
+ An open-weights language model for contextually-aware end-of-utterance (EOU) detection in voice AI applications. The model predicts whether a user has finished speaking based on the semantic content of their transcribed speech, providing a critical complement to voice activity detection (VAD) systems.
38
+
39
+ > **📖 For installation, usage examples, and integration guides, see the [LiveKit documentation](https://docs.livekit.io/agents/logic/turns/turn-detector/).**
40
+
41
+ ## Table of Contents
42
+
43
+ - [Overview](#overview)
44
+ - [Model Variants](#model-variants)
45
+ - [How It Works](#how-it-works)
46
+ - [Architecture and Training](#architecture-and-training)
47
+ - [Supported Languages](#supported-languages)
48
+ - [Benchmarks](#benchmarks)
49
+ - [Usage](#usage)
50
+ - [Deployment Requirements](#deployment-requirements)
51
+ - [Limitations](#limitations)
52
+ - [License](LICENSE)
53
+ - [Resources](#resources)
54
+
55
+ ## Overview
56
+
57
+ Traditional voice agents rely on voice activity detection (VAD) to determine when a user has finished speaking. VAD works by detecting the presence or absence of speech in an audio signal and applying a silence timer. While effective for detecting pauses, VAD lacks language understanding and frequently causes false positives. For example, a user who says *"I need to think about that for a moment..."* and then pauses will be interrupted by a VAD-only system, even though they clearly intend to continue.
58
+
59
+ This model adds semantic understanding to the turn detection process. It analyzes the transcribed text of a conversation in real time and predicts the probability that the user has completed their turn. When integrated into a voice pipeline alongside VAD, it substantially reduces unwanted interruptions while maintaining responsiveness.
60
+
61
+ The model is particularly effective in scenarios involving structured data input — such as dictating addresses, phone numbers, email addresses, and credit card numbers — where natural pauses between segments do not indicate completion.
62
+
63
+ ## Model Variants
64
+
65
+ **Multilingual** (recommended) and **English-only** (deprecated) are distributed as INT8 quantized ONNX models (`model_q8.onnx`) optimized for CPU inference.
66
+
67
+ > **⚠️ The English-only model (`EnglishModel`) is deprecated.** Use the **multilingual model (`MultilingualModel`)** for all new projects, including English-only applications. The multilingual model provides better accuracy across all languages — including English — thanks to knowledge distillation from a larger teacher model and an expanded training dataset. The English-only variant will not receive further updates.
68
+
69
+ ## How It Works
70
+
71
+ The model operates on transcribed text from a speech-to-text (STT) system, not raw audio.
72
+
73
+ 1. **Input**: The recent conversation history (up to **6 turns**, truncated to **128 tokens**) is formatted using the [Qwen chat template](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) with `<|im_start|>` / `<|im_end|>` delimiters. The final user message is left *without* the closing `<|im_end|>` token.
74
+
75
+ 2. **Prediction**: The model predicts the probability of the `<|im_end|>` token appearing next. A **high probability** indicates the user has likely finished their utterance. A **low probability** indicates they are likely to continue.
76
+
77
+ 3. **Thresholding**: Per-language thresholds (stored in `languages.json`) convert the raw probability into a binary decision. These thresholds are tuned to balance responsiveness and accuracy for each supported language.
78
+
79
+ 4. **Integration with VAD**: In the LiveKit Agents framework, the model works alongside the [Silero VAD](https://docs.livekit.io/agents/logic/turns/vad/) plugin. VAD handles speech presence detection and interruption triggering, while this model provides the semantic signal for when to commit a turn.
80
+
81
+ ### Text Preprocessing
82
+
83
+ The **multilingual** variant applies the following normalization before inference:
84
+
85
+ - NFKC unicode normalization
86
+ - Lowercasing
87
+ - Punctuation removal (preserving apostrophes and hyphens)
88
+ - Whitespace collapsing
89
+
90
+ The **English-only** variant passes raw transcribed text without normalization.
91
+
92
+ ## Architecture and Training
93
+
94
+ ### Base Model
95
+
96
+ Both variants are fine-tuned from [Qwen/Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct), selected for its strong performance on this task while enabling low-latency CPU inference.
97
+
98
+ ### Knowledge Distillation
99
+
100
+ A **Qwen2.5-7B-Instruct** model was first fine-tuned as a teacher on end-of-turn prediction. Its knowledge was then distilled into the 0.5B student model. The distilled model approaches teacher-level accuracy while maintaining the efficiency of the smaller architecture, converging after approximately 1,500 training steps.
101
+
102
+ ### Training Data
103
+
104
+ The training dataset is a mix of:
105
+
106
+ - **Real call center transcripts** covering diverse conversational patterns
107
+ - **Synthetic dialogues** emphasizing structured data input — addresses, email addresses, phone numbers, and credit card numbers
108
+ - **Multi-format STT outputs** to handle provider variation (e.g., "forty two" vs. "42"), ensuring consistent predictions across different STT engines without runtime overhead
109
+
110
+ Although structured data enhancements were added only to the English training set, performance improvements generalized across languages due to the multilingual knowledge encoded in the Qwen2.5 base model.
111
+
112
+ ### Quantization
113
+
114
+ The trained model is exported to ONNX format and quantized to INT8 (`model_q8.onnx`), enabling efficient CPU-only inference with ONNX Runtime.
115
+
116
+ ## Supported Languages
117
+
118
+ The multilingual model supports 14 languages. The model relies on the STT provider to report the detected language, which is then used to select the appropriate per-language threshold.
119
+
120
+ English, Spanish, French, German, Italian, Portuguese, Dutch, Chinese, Japanese, Korean, Indonesian, Turkish, Russian, Hindi
121
+
122
+ ## Benchmarks
123
+
124
+ ### Detection Accuracy (Multilingual Variant)
125
+
126
+ - **True positive** — the model correctly identifies the user has finished speaking.
127
+ - **True negative** — the model correctly identifies the user will continue speaking.
128
+
129
+ | Language | True Positive Rate | True Negative Rate |
130
+ |---|---|---|
131
+ | Hindi | 99.4% | 96.3% |
132
+ | Korean | 99.3% | 94.5% |
133
+ | French | 99.3% | 88.9% |
134
+ | Indonesian | 99.3% | 89.4% |
135
+ | Japanese | 99.3% | 88.8% |
136
+ | Dutch | 99.3% | 88.1% |
137
+ | Russian | 99.3% | 88.0% |
138
+ | German | 99.3% | 87.8% |
139
+ | Portuguese | 99.4% | 87.4% |
140
+ | Turkish | 99.3% | 87.3% |
141
+ | English | 99.3% | 87.0% |
142
+ | Chinese | 99.3% | 86.6% |
143
+ | Spanish | 99.3% | 86.0% |
144
+ | Italian | 99.3% | 85.1% |
145
+
146
+ ### Improvement Over Prior Version
147
+
148
+ The multilingual v0.4.1 release achieved a **39.23% relative improvement** in handling structured inputs (emails, addresses, phone numbers, credit card numbers) compared to the prior version, reducing premature interruptions during data collection scenarios.
149
+
150
+ ## Usage
151
+
152
+ The model is designed for use as a turn detection plugin within the [LiveKit Agents](https://github.com/livekit/agents) framework.
153
+
154
+ For complete installation instructions, code examples (Python and Node.js), and configuration options, see the **[LiveKit turn detector plugin documentation](https://docs.livekit.io/agents/logic/turns/turn-detector/)**.
155
+
156
+ For broader context on how turn detection fits into the voice pipeline — including VAD configuration, interruption handling, and manual turn control — see the **[Turns overview](https://docs.livekit.io/agents/logic/turns/)**.
157
+
158
+ ## Deployment Requirements
159
+
160
+ - **Runtime**: CPU-only (no GPU required). Uses [ONNX Runtime](https://onnxruntime.ai/) with the `CPUExecutionProvider`.
161
+ - **RAM**: <500 MB for the multilingual model.
162
+ - **Instance type**: Use compute-optimized instances (e.g., AWS c6i, c7i). Avoid burstable instances (e.g., AWS t3, t4g) to prevent inference timeouts from CPU credit exhaustion.
163
+ - **LiveKit Cloud**: The model is deployed globally on LiveKit Cloud. Agents running there automatically use the optimized remote inference service with no local resource requirements.
164
+
165
+ ## Limitations
166
+
167
+ - **Text-only input**: The model operates on STT-transcribed text and cannot incorporate prosodic cues such as pauses, intonation, or emphasis. Future versions may integrate multimodal audio features.
168
+ - **STT dependency**: Prediction quality depends on the accuracy and output format of the upstream STT provider. Mismatches between training and deployment STT formats may degrade performance.
169
+ - **Context window**: Limited to 128 tokens across a maximum of 6 conversation turns.
170
+ - **Language coverage**: Currently supports 14 languages. Performance on unsupported languages is undefined.
171
+ - **Realtime model compatibility**: Cannot be used with audio-native realtime models (e.g., OpenAI Realtime API) without adding a separate STT service, which incurs additional cost and latency.
172
+
173
+ ## License
174
+
175
+ This model is released under the [LiveKit Model License](./LICENSE).
176
+
177
+ ## Resources
178
+
179
+ - **[Documentation](https://docs.livekit.io/agents/logic/turns/turn-detector/)**: Full plugin documentation, installation, and integration guide.
180
+ - **[Turns Overview](https://docs.livekit.io/agents/logic/turns/)**: How turn detection fits into the LiveKit Agents voice pipeline.
181
+ - **[Blog: Improved End-of-Turn Model](https://blog.livekit.io/improved-end-of-turn-model-cuts-voice-ai-interruptions-39/)**: Technical deep dive on the multilingual distillation approach and benchmarks.
182
+ - **[Blog: Using a Transformer for Turn Detection](https://blog.livekit.io/using-a-transformer-to-improve-end-of-turn-detection/)**: Original blog post introducing the concept and architecture.
183
+ - **[Video: LiveKit Turn Detector](https://youtu.be/OZG0oZKctgw)**: Overview video demonstrating the plugin.
184
+ - **[GitHub: Plugin Source](https://github.com/livekit/agents/tree/main/livekit-plugins/livekit-plugins-turn-detector)**: Source code for the `livekit-plugins-turn-detector` package.
185
+ - **[PyPI](https://pypi.org/project/livekit-plugins-turn-detector/)** | **[npm](https://www.npmjs.com/package/@livekit/agents-plugin-livekit)**: Package registries.
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 49153,
3
+ "<|user|>": 49152
4
+ }
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/tmp/tmpdhw4_gdh",
4
+ "architectures": [
5
+ "LlamaForCausalLM"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 0,
10
+ "eos_token_id": 0,
11
+ "head_dim": 64,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 576,
14
+ "initializer_range": 0.041666666666666664,
15
+ "intermediate_size": 1536,
16
+ "is_llama_config": true,
17
+ "max_position_embeddings": 8192,
18
+ "mlp_bias": false,
19
+ "model_type": "llama",
20
+ "num_attention_heads": 9,
21
+ "num_hidden_layers": 30,
22
+ "num_key_value_heads": 3,
23
+ "pretraining_tp": 1,
24
+ "rms_norm_eps": 1e-05,
25
+ "rope_interleaved": false,
26
+ "rope_scaling": null,
27
+ "rope_theta": 100000,
28
+ "tie_word_embeddings": true,
29
+ "transformers_version": "4.46.3",
30
+ "use_cache": true,
31
+ "vocab_size": 49154
32
+ }
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.46.3"
6
+ }
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f7b4c93c1cdb6d1e858b01f63e5f0f15bdb979dd0d177e8996a244c80e03925
3
+ size 538095016
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/model_q8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e685767c3643b0363c9f826a98325683f29e9c7d550162c8e8740ba33aa31aa
3
+ size 165035487
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e685767c3643b0363c9f826a98325683f29e9c7d550162c8e8740ba33aa31aa
3
+ size 165035487
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/ort_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "one_external_file": true,
3
+ "opset": null,
4
+ "optimization": {},
5
+ "quantization": {
6
+ "activations_dtype": "QUInt8",
7
+ "activations_symmetric": false,
8
+ "format": "QOperator",
9
+ "is_static": false,
10
+ "mode": "IntegerOps",
11
+ "nodes_to_exclude": [],
12
+ "nodes_to_quantize": [],
13
+ "operators_to_quantize": [
14
+ "Conv",
15
+ "MatMul",
16
+ "Attention",
17
+ "LSTM",
18
+ "Gather",
19
+ "Transpose",
20
+ "EmbedLayerNormalization"
21
+ ],
22
+ "per_channel": false,
23
+ "qdq_add_pair_to_weight": false,
24
+ "qdq_dedicated_pair": false,
25
+ "qdq_op_type_per_channel_support_to_axis": {
26
+ "MatMul": 1
27
+ },
28
+ "reduce_range": false,
29
+ "weights_dtype": "QInt8",
30
+ "weights_symmetric": true
31
+ },
32
+ "use_external_data_format": false
33
+ }
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/special_tokens_map.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|user|>",
6
+ "<|assistant|>"
7
+ ],
8
+ "bos_token": {
9
+ "content": "<|endoftext|>",
10
+ "lstrip": false,
11
+ "normalized": false,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "eos_token": {
16
+ "content": "<|endoftext|>",
17
+ "lstrip": false,
18
+ "normalized": false,
19
+ "rstrip": false,
20
+ "single_word": false
21
+ },
22
+ "pad_token": {
23
+ "content": "<|endoftext|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "unk_token": {
30
+ "content": "<|endoftext|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/tokenizer_config.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "49152": {
141
+ "content": "<|user|>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "49153": {
149
+ "content": "<|assistant|>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ }
156
+ },
157
+ "additional_special_tokens": [
158
+ "<|im_start|>",
159
+ "<|im_end|>",
160
+ "<|user|>",
161
+ "<|assistant|>"
162
+ ],
163
+ "bos_token": "<|endoftext|>",
164
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + '<|' + message['role'] + '|>' + message['content'] + '<|im_end|>'}}{% endfor %}",
165
+ "clean_up_tokenization_spaces": false,
166
+ "eos_token": "<|endoftext|>",
167
+ "model_max_length": 8192,
168
+ "pad_token": "<|endoftext|>",
169
+ "tokenizer_class": "GPT2Tokenizer",
170
+ "unk_token": "<|endoftext|>",
171
+ "vocab_size": 49152
172
+ }
livekit-turn-detector/snapshots/fba34c38ad5d30a63ebb83a9e6bf271cf4c91d67/vocab.json ADDED
The diff for this file is too large to render. See raw diff