Jashan887 commited on
Commit
112d755
·
verified ·
1 Parent(s): 4401cc9

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ banner.png filter=lfs diff=lfs merge=lfs -text
38
+ consortium.png filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to the Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by the Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding any notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ Copyright 2026 Lunit Inc.
179
+
180
+ Licensed under the Apache License, Version 2.0 (the "License");
181
+ you may not use this file except in compliance with the License.
182
+ You may obtain a copy of the License at
183
+
184
+ http://www.apache.org/licenses/LICENSE-2.0
185
+
186
+ Unless required by applicable law or agreed to in writing, software
187
+ distributed under the License is distributed on an "AS IS" BASIS,
188
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
189
+ See the License for the specific language governing permissions and
190
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ base_model:
6
+ - trillionlabs/Gravity-16B-A3B-Base
7
+ tags:
8
+ - medical
9
+ - clinical
10
+ - mixture-of-experts
11
+ - conversational
12
+ - sft
13
+ library_name: transformers
14
+ pipeline_tag: text-generation
15
+ ---
16
+
17
+ <p align="center">
18
+ <img src="banner.png" alt="L1" style="width: 80%;">
19
+ </p>
20
+
21
+ # Learning Unit 1
22
+
23
+ **L1** (Learning Unit 1) is the first language model from [Lunit](https://www.lunit.io) and Lunit Consortium, purpose-built for the medical domain. Derived from [Gravity-16B-A3B-Base](https://huggingface.co/trillionlabs/Gravity-16B-A3B-Base), L1 is designed for clinical reasoning and decision support.
24
+
25
+ ### ✨ Key Highlights
26
+ * 🩺 **Medical-Domain Specialized**: Developed specifically for clinical reasoning and medical decision support
27
+ * ⚡ **Efficient MoE**: Only 3B parameters active per token out of 16.24B total — fast inference with high capacity
28
+ * 💭 **Thinking Model**: Performs step-by-step reasoning in `<think>` tags before generating the final answer
29
+
30
+ > **Note:** L1 reasons internally using `<think>...</think>` blocks before producing a response. This chain-of-thought process improves answer quality but consumes additional tokens. Set `max_tokens` accordingly (recommended: 2048+).
31
+
32
+ ### 📋 Model Specifications
33
+
34
+ - Type: Causal Language Model
35
+ - Base Model: [Gravity-16B-A3B-Base](https://huggingface.co/trillionlabs/Gravity-16B-A3B-Base) from Trillion Labs and Lunit Consortium
36
+ - Architecture: GravityMoE (Sparse Mixture-of-Experts with MLA)
37
+ - Total Parameters: 16.24B
38
+ - Active Parameters: 3B
39
+ - Number of Layers: 28
40
+ - Attention Heads: 16
41
+ - KV Heads: 16
42
+ - Hidden Size: 2048
43
+ - MoE Intermediate Size: 1408
44
+ - Routed Experts: 64 (top-8 selection)
45
+ - Shared Experts: 1
46
+ - Context Length: 32,768 tokens
47
+ - Vocabulary Size: 151,552
48
+ - Tokenizer: GLM-4.5
49
+ - Precision: bf16
50
+
51
+ ## 🚀 Quickstart
52
+
53
+ ### SGLang (Recommended)
54
+
55
+ **Install:**
56
+ ```bash
57
+ pip install "sglang[all] @ git+https://github.com/trillion-labs/sglang-gravity.git#subdirectory=python"
58
+ ```
59
+
60
+ **Launch server:**
61
+ ```bash
62
+ python -m sglang.launch_server \
63
+ --model-path learning-unit/L1-16B-A3B \
64
+ --port 9006 --host 0.0.0.0 \
65
+ --tp 1 --dtype bfloat16 --trust-remote-code \
66
+ --attention-backend triton \
67
+ --moe-runner-backend triton
68
+ ```
69
+
70
+ **Query:**
71
+ ```bash
72
+ curl -X POST http://localhost:9006/v1/chat/completions \
73
+ -H "Content-Type: application/json" \
74
+ -d '{
75
+ "model": "learning-unit/L1-16B-A3B",
76
+ "messages": [
77
+ {"role": "user", "content": "What are the diagnostic criteria for sepsis?"}
78
+ ],
79
+ "max_tokens": 2048
80
+ }'
81
+ ```
82
+
83
+ ### Transformers
84
+
85
+ **Install:**
86
+ ```bash
87
+ pip install "transformers>=5.0" torch
88
+ ```
89
+
90
+ ```python
91
+ import torch
92
+ from transformers import AutoModelForCausalLM, AutoTokenizer
93
+
94
+ model_name = "learning-unit/L1-16B-A3B"
95
+
96
+ model = AutoModelForCausalLM.from_pretrained(
97
+ model_name,
98
+ torch_dtype=torch.bfloat16,
99
+ device_map="auto",
100
+ trust_remote_code=True,
101
+ )
102
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
103
+
104
+ messages = [
105
+ {"role": "user", "content": "What are the diagnostic criteria for sepsis?"}
106
+ ]
107
+ text = tokenizer.apply_chat_template(
108
+ messages,
109
+ tokenize=False,
110
+ add_generation_prompt=True,
111
+ )
112
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
113
+
114
+ generated_ids = model.generate(
115
+ **model_inputs,
116
+ max_new_tokens=2048,
117
+ temperature=0.7,
118
+ do_sample=True,
119
+ )
120
+ generated_ids = [
121
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
122
+ ]
123
+
124
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
125
+ print(response)
126
+ ```
127
+
128
+ ## 💬 Examples
129
+
130
+ L1 is specialized for the medical domain and covers a wide range of clinical scenarios. Below are representative examples from real-world clinical use cases.
131
+
132
+ ### Medical Q&A
133
+
134
+ > A 45-year-old woman with lupus nephritis on mycophenolate and prednisone develops fever, dry cough, and bilateral ground-glass opacities on chest CT. Her CD4 count is 180. What is your differential diagnosis and recommended workup?
135
+
136
+ ### Patient Education
137
+
138
+ > I have diabetes and use insulin daily. What is the proper way to store insulin at home?
139
+
140
+ ### Clinical Documentation
141
+
142
+ > Please draft an overnight progress note. Patient labs: RBC 4.5, WBC 8. Vitals: HR 82, BP 118/76, RR 15, Temp 37.1. Nurse reports stable overnight. Plan: continue antibiotics, recheck labs in the morning.
143
+
144
+ ### Emergency Triage
145
+
146
+ > 다음 응급실 환자에 대해 KTAS triage를 수행하고, 초기 진단 및 감별진단을 제시해주세요. 78세 여성 환자가 119 구급차로 응급실에 내원했습니다. 22시경 갑자기 좌측 안면이 처지고 말이 어눌해지는 증상이 발생했습니다. 두통을 호소하며, 고혈압 병력이 있습니다. 활력징후는 혈압 172/88, 심박수 92, 호흡수 14, 체온 36.8, 산소포화도 98%이고 의식은 명료합니다. 사지 위약감은 없습니다.
147
+
148
+ ### Adverse Drug Reaction (ADR) Causality Assessment
149
+
150
+ > 다음 환자의 약물이상반응(ADR)에 대해 WHO-UMC 기준으로 인과관계를 평가해주세요. 80세 여성 환자가 기관지확장증으로 입원 중 moxifloxacin 400mg IV를 투여받았습니다. 투여 중 전신 피부 가려움이 새로 발생했고, 약물 중단 후 환자 본인도 가려움이 줄어드는 양상을 표현했으며 이후 회복되었습니다. 재투여는 시행하지 않았습니다. 기존 약물 알레르기력은 없고, 가려움을 유발할 만한 다른 병용약물이나 피부질환은 확인되지 않았습니다.
151
+
152
+ ## 📊 Benchmark
153
+
154
+ All benchmarks were evaluated using [CoEval](https://github.com/lunit-io/CoEval), Lunit's open-source medical LLM evaluation framework. Evaluations use greedy decoding (temperature=0). To reproduce these results:
155
+
156
+ ```bash
157
+ git clone https://github.com/lunit-io/CoEval.git
158
+ cd CoEval
159
+ ```
160
+
161
+ Refer to the [CoEval Quickstart](https://github.com/lunit-io/CoEval#quickstart) for setup and evaluation instructions.
162
+
163
+ ### MCQA Benchmarks
164
+
165
+ | Model | [PubMedQA](https://huggingface.co/datasets/qiaojin/PubMedQA) | [AttrBench](https://huggingface.co/datasets/osunlp/AttributionBench) | [MedQA](https://huggingface.co/datasets/GBaker/MedQA-USMLE-4-options) | [CareQA](https://huggingface.co/datasets/HPAI-BSC/CareQA) | [HeadQA](https://huggingface.co/datasets/alesi12/head_qa_v2) | [MedMCQA](https://huggingface.co/datasets/lighteval/med_mcqa) | [MMLU-Pro (Health)](https://huggingface.co/datasets/TIGER-Lab/MMLU-Pro) | [M-ARC](https://huggingface.co/datasets/mkieffer/M-ARC) | [MetaMedQA](https://huggingface.co/datasets/maximegmd/MetaMedQA) | [MedHallu](https://huggingface.co/datasets/UTAustin-AIHealth/MedHallu) | [MedCalc](https://huggingface.co/datasets/ncbi/MedCalc-Bench) | [MedBullets](https://huggingface.co/datasets/mkieffer/Medbullets) 4-opt | [MedBullets](https://huggingface.co/datasets/mkieffer/Medbullets) 5-opt | [MedXpertQA](https://huggingface.co/datasets/TsinghuaC3I/MedXpertQA)-R | [MedXpertQA](https://huggingface.co/datasets/TsinghuaC3I/MedXpertQA)-U | W.Avg |
166
+ |:---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
167
+ | GPT-OSS-120B | 78.00 | 76.10 | 91.10 | 91.00 | 88.40 | 74.80 | 74.60 | 40.00 | 76.50 | 83.50 | 30.30 | 84.70 | 82.10 | 35.60 | 32.90 | 79.43 |
168
+ | GPT-OSS-20B | 75.80 | 74.80 | 83.90 | 84.80 | 83.30 | 65.40 | 70.50 | 31.00 | 70.10 | 81.30 | 29.20 | 73.40 | 70.50 | 24.70 | 21.20 | 73.38 |
169
+ | Qwen3.5-122B | 76.40 | 55.68 | 87.80 | 86.40 | 84.00 | 74.40 | 73.00 | 59.00 | 73.90 | 37.50 | 53.70 | 79.20 | 79.50 | 35.90 | 35.30 | 75.08 |
170
+ | MedGemma-27B | 73.40 | 74.80 | 84.40 | 85.00 | 83.80 | 71.90 | 73.00 | 48.00 | 69.60 | 81.40 | 24.10 | 73.70 | 68.80 | 19.10 | 20.50 | 73.99 |
171
+ | Gemma4-26B-A4B | 76.40 | 72.00 | 81.80 | 84.50 | 82.30 | 67.30 | 73.50 | 67.00 | 71.50 | 86.50 | 45.60 | 73.70 | 67.50 | 45.10 | 39.20 | 75.34 |
172
+ | L1-16B-A3B | 84.20 | 78.40 | 85.50 | 88.20 | 85.80 | 76.70 | 74.90 | 82.00 | 73.10 | 76.10 | 43.90 | 78.90 | 70.80 | 27.50 | 29.20 | 77.74 |
173
+
174
+ ### Chat Task
175
+
176
+ | Model | [HealthBench-Consensus](https://github.com/openai/simple-evals) |
177
+ |:---|:---:|
178
+ | GPT-OSS-120B | 90.60 |
179
+ | GPT-OSS-20B | 78.70 |
180
+ | Qwen3.5-122B | 92.20 |
181
+ | MedGemma-27B | 90.70 |
182
+ | Gemma4-26B-A4B | 92.60 |
183
+ | L1-16B-A3B | 93.50 |
184
+
185
+ ## 📝 Citation
186
+
187
+ ```bibtex
188
+ @misc{lunit2026l1,
189
+ title={L1: The First Clinical Language Model by Lunit},
190
+ author={Lunit},
191
+ year={2026},
192
+ url={https://huggingface.co/learning-unit/L1-16B-A3B}
193
+ }
194
+ ```
195
+
196
+ ## ⚠️ Limitations
197
+
198
+ - **Not a substitute for professional medical judgment.** L1 may generate factually incorrect, incomplete, or outdated clinical information. All outputs should be verified by qualified healthcare professionals.
199
+ - **Thinking overhead.** Chain-of-thought reasoning in `<think>` tags increases token consumption and latency compared to non-thinking models of similar size.
200
+ - **Context length.** Maximum context length is 32,768 tokens.
201
+ - **No real-time knowledge.** The model's knowledge is limited to its training data cutoff and does not reflect the latest medical guidelines or drug approvals.
202
+
203
+ ## 🤝 Acknowledgements
204
+
205
+ This work was supported by the Domain-Specific Foundation Model Project (인공지능 특화 파운데이션 모델 프로젝트), funded by the Ministry of Science and ICT (과학기술정보통신부) and managed by the National IT Industry Promotion Agency (NIPA).
206
+
207
+ L1 is a collaborative effort by the following consortium members:
208
+
209
+ **Industry**
210
+ - Lunit
211
+ - Trillion Labs
212
+ - SK Biopharmaceuticals
213
+ - Kakao Healthcare
214
+ - AIGEN Sciences
215
+ - D-Circle
216
+ - Rebellions
217
+ - Standigm
218
+
219
+ **Academia**
220
+ - Prof. Choi Yun-jae's Lab from KAIST
221
+ - Prof. Hong Seung-hoon's Lab from KAIST
222
+ - Prof. Jung Yu-seong's Lab from SNU
223
+ - Prof. Kim Hyun-woo's Lab from KAIST
224
+ - Prof. Kim Tae-gyun's Lab from KAIST
225
+ - Prof. Ye Jong-cheol's Lab from KAIST
226
+
227
+ **Hospitals**
228
+ - NHIS Ilsan Hospital
229
+ - Ewha Womans University Seoul Hospital
230
+ - Keimyung University Dongsan Medical Center
231
+ - Konyang University Hospital
232
+ - Korea University Research & Business Foundation
233
+ - Kyung Hee University Hospital at Gangdong
234
+ - Kyung Hee University Medical Center
235
+ - Pusan National University Yangsan Hospital
236
+ - Yongin Severance Hospital
237
+
238
+ <p align="center">
239
+ <img src="consortium.png" alt="Consortium Members" style="width: 80%;">
240
+ </p>
241
+
242
+ ## 📄 License
243
+
244
+ This model is licensed under the [Apache 2.0 License](LICENSE).
245
+
246
+ ## 📬 Contact
247
+
248
+ - Taesoo Kim (김태수) — [taesoo.kim@lunit.io](mailto:taesoo.kim@lunit.io)
249
+ - Donggeun Yoo (유동근) — [dgyoo@lunit.io](mailto:dgyoo@lunit.io)
banner.png ADDED

Git LFS Details

  • SHA256: 6b2787cddb60f574b00fc3be985850ee03acdf7fcc80b5f8ca91ae6093afc321
  • Pointer size: 131 Bytes
  • Size of remote file: 130 kB
chat_template.jinja ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [gMASK]<sop>
2
+ {%- if tools -%}
3
+ <|system|>
4
+ # Tools
5
+
6
+ You may call one or more functions to assist with the user query.
7
+
8
+ You are provided with function signatures within <tools></tools> XML tags:
9
+ <tools>
10
+ {% for tool in tools %}
11
+ {{ tool | tojson(ensure_ascii=False) }}
12
+ {% endfor %}
13
+ </tools>
14
+
15
+ For each function call, output the function name and arguments within the following XML format:
16
+ <tool_call>{function-name}
17
+ <arg_key>{arg-key-1}</arg_key>
18
+ <arg_value>{arg-value-1}</arg_value>
19
+ <arg_key>{arg-key-2}</arg_key>
20
+ <arg_value>{arg-value-2}</arg_value>
21
+ ...
22
+ </tool_call>{%- endif -%}
23
+ {%- macro visible_text(content) -%}
24
+ {%- if content is string -%}
25
+ {{- content }}
26
+ {%- elif content is iterable and content is not mapping -%}
27
+ {%- for item in content -%}
28
+ {%- if item is mapping and item.type == 'text' -%}
29
+ {{- item.text }}
30
+ {%- elif item is string -%}
31
+ {{- item }}
32
+ {%- endif -%}
33
+ {%- endfor -%}
34
+ {%- else -%}
35
+ {{- content }}
36
+ {%- endif -%}
37
+ {%- endmacro -%}
38
+ {%- set ns = namespace(last_user_index=-1) %}
39
+ {%- for m in messages %}
40
+ {%- if m.role == 'user' %}
41
+ {% set ns.last_user_index = loop.index0 -%}
42
+ {%- endif %}
43
+ {%- endfor %}
44
+ {% for m in messages %}
45
+ {%- if m.role == 'user' -%}<|user|>
46
+ {{ visible_text(m.content) }}
47
+ {{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}
48
+ {%- elif m.role == 'assistant' -%}
49
+ <|assistant|>
50
+ {%- set reasoning_content = '' %}
51
+ {%- set content = visible_text(m.content) %}
52
+ {%- if m.reasoning_content is string %}
53
+ {%- set reasoning_content = m.reasoning_content %}
54
+ {%- else %}
55
+ {%- if '</think>' in content %}
56
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
57
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
58
+ {%- endif %}
59
+ {%- endif %}
60
+ {%- if reasoning_content -%}
61
+ {{ '\n<think>' + reasoning_content.strip() + '</think>'}}
62
+ {%- else -%}
63
+ {{ '\n<think></think>' }}
64
+ {%- endif -%}
65
+ {%- if content.strip() -%}
66
+ {{ '\n' + content.strip() }}
67
+ {%- endif -%}
68
+ {% if m.tool_calls %}
69
+ {% for tc in m.tool_calls %}
70
+ {%- if tc.function %}
71
+ {%- set tc = tc.function %}
72
+ {%- endif %}
73
+ {{ '\n<tool_call>' + tc.name }}
74
+ {% set _args = tc.arguments %}
75
+ {% for k, v in _args.items() %}
76
+ <arg_key>{{ k }}</arg_key>
77
+ <arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
78
+ {% endfor %}
79
+ </tool_call>{% endfor %}
80
+ {% endif %}
81
+ {%- elif m.role == 'tool' -%}
82
+ {%- if m.content is string -%}
83
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
84
+ {{- '<|observation|>' }}
85
+ {%- endif %}
86
+ {{- '\n<tool_response>\n' }}
87
+ {{- m.content }}
88
+ {{- '\n</tool_response>' }}
89
+ {%- else -%}
90
+ <|observation|>{% for tr in m.content %}
91
+
92
+ <tool_response>
93
+ {{ tr.output if tr.output is defined else tr }}
94
+ </tool_response>{% endfor -%}
95
+ {% endif -%}
96
+ {%- elif m.role == 'system' -%}
97
+ <|system|>
98
+ {{ visible_text(m.content) }}
99
+ {%- endif -%}
100
+ {%- endfor -%}
101
+ {%- if add_generation_prompt -%}
102
+ <|assistant|>{{- '\n<think></think>' if (enable_thinking is defined and not enable_thinking) else '' -}}
103
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GravityMoEForCausalLM"
4
+ ],
5
+ "model_type": "gravity_moe",
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_gravity_moe.GravityMoEConfig",
8
+ "AutoModelForCausalLM": "modeling_gravity_moe.GravityMoEForCausalLM"
9
+ },
10
+ "vocab_size": 151552,
11
+ "hidden_size": 2048,
12
+ "intermediate_size": 8192,
13
+ "moe_intermediate_size": 1408,
14
+ "num_hidden_layers": 28,
15
+ "num_attention_heads": 16,
16
+ "num_key_value_heads": 16,
17
+ "q_lora_rank": null,
18
+ "kv_lora_rank": 512,
19
+ "qk_rope_head_dim": 64,
20
+ "qk_nope_head_dim": 128,
21
+ "v_head_dim": 128,
22
+ "n_routed_experts": 64,
23
+ "n_shared_experts": 1,
24
+ "num_experts_per_tok": 8,
25
+ "first_k_dense_replace": 1,
26
+ "moe_layer_freq": 1,
27
+ "routed_scaling_factor": 2.446,
28
+ "norm_topk_prob": true,
29
+ "scoring_func": "sigmoid",
30
+ "topk_method": "noaux_tc",
31
+ "n_group": 1,
32
+ "topk_group": 1,
33
+ "hidden_act": "silu",
34
+ "max_position_embeddings": 32768,
35
+ "initializer_range": 0.02,
36
+ "rms_norm_eps": 1e-06,
37
+ "use_cache": true,
38
+ "rope_theta": 1000000.0,
39
+ "rope_scaling": null,
40
+ "attention_bias": false,
41
+ "attention_dropout": 0.0,
42
+ "tie_word_embeddings": false,
43
+ "bos_token_id": null,
44
+ "eos_token_id": 151329,
45
+ "torch_dtype": "bfloat16",
46
+ "pad_token_id": 151329
47
+ }
configuration_gravity_moe.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2026 Trillion Labs and the HuggingFace Inc. team. All rights reserved.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """GravityMoE model configuration — inherits from DeepSeek V3."""
15
+
16
+ from transformers import DeepseekV3Config
17
+
18
+
19
+ class GravityMoEConfig(DeepseekV3Config):
20
+ r"""
21
+ Configuration class for the GravityMoE model, inheriting from
22
+ [`DeepseekV3Config`]. GravityMoE shares the same architecture as
23
+ DeepSeek V3 (sparse MoE with MLA) but uses different hyperparameters.
24
+
25
+ Only default values that differ from DeepSeek V3 are overridden here.
26
+ See [`DeepseekV3Config`] for full documentation of all parameters.
27
+
28
+ Example:
29
+
30
+ ```python
31
+ >>> from configuration_gravity_moe import GravityMoEConfig
32
+
33
+ >>> configuration = GravityMoEConfig()
34
+ >>> configuration.model_type
35
+ 'gravity_moe'
36
+ ```
37
+ """
38
+
39
+ model_type = "gravity_moe"
40
+
41
+ def __init__(
42
+ self,
43
+ vocab_size=151552,
44
+ hidden_size=2048,
45
+ intermediate_size=8192,
46
+ moe_intermediate_size=1408,
47
+ num_hidden_layers=28,
48
+ num_attention_heads=16,
49
+ num_key_value_heads=16,
50
+ n_shared_experts=1,
51
+ n_routed_experts=64,
52
+ routed_scaling_factor=2.446,
53
+ kv_lora_rank=512,
54
+ q_lora_rank=None,
55
+ qk_rope_head_dim=64,
56
+ v_head_dim=128,
57
+ qk_nope_head_dim=128,
58
+ n_group=1,
59
+ topk_group=1,
60
+ num_experts_per_tok=8,
61
+ first_k_dense_replace=1,
62
+ norm_topk_prob=True,
63
+ hidden_act="silu",
64
+ max_position_embeddings=65536,
65
+ initializer_range=0.02,
66
+ rms_norm_eps=1e-6,
67
+ use_cache=True,
68
+ pad_token_id=None,
69
+ bos_token_id=0,
70
+ eos_token_id=1,
71
+ tie_word_embeddings=False,
72
+ rope_theta=1000000.0,
73
+ rope_scaling=None,
74
+ rope_interleave=True,
75
+ attention_bias=False,
76
+ attention_dropout=0.0,
77
+ **kwargs,
78
+ ):
79
+ super().__init__(
80
+ vocab_size=vocab_size,
81
+ hidden_size=hidden_size,
82
+ intermediate_size=intermediate_size,
83
+ moe_intermediate_size=moe_intermediate_size,
84
+ num_hidden_layers=num_hidden_layers,
85
+ num_attention_heads=num_attention_heads,
86
+ num_key_value_heads=num_key_value_heads,
87
+ n_shared_experts=n_shared_experts,
88
+ n_routed_experts=n_routed_experts,
89
+ routed_scaling_factor=routed_scaling_factor,
90
+ kv_lora_rank=kv_lora_rank,
91
+ q_lora_rank=q_lora_rank,
92
+ qk_rope_head_dim=qk_rope_head_dim,
93
+ v_head_dim=v_head_dim,
94
+ qk_nope_head_dim=qk_nope_head_dim,
95
+ n_group=n_group,
96
+ topk_group=topk_group,
97
+ num_experts_per_tok=num_experts_per_tok,
98
+ first_k_dense_replace=first_k_dense_replace,
99
+ norm_topk_prob=norm_topk_prob,
100
+ hidden_act=hidden_act,
101
+ max_position_embeddings=max_position_embeddings,
102
+ initializer_range=initializer_range,
103
+ rms_norm_eps=rms_norm_eps,
104
+ use_cache=use_cache,
105
+ pad_token_id=pad_token_id,
106
+ bos_token_id=bos_token_id,
107
+ eos_token_id=eos_token_id,
108
+ tie_word_embeddings=tie_word_embeddings,
109
+ rope_theta=rope_theta,
110
+ rope_scaling=rope_scaling,
111
+ rope_interleave=rope_interleave,
112
+ attention_bias=attention_bias,
113
+ attention_dropout=attention_dropout,
114
+ **kwargs,
115
+ )
116
+
117
+
118
+ __all__ = ["GravityMoEConfig"]
consortium.png ADDED

Git LFS Details

  • SHA256: 6c9b7a3ef909a9c183ac56117d38faecd7ea99151dab94353225e35f38309dd6
  • Pointer size: 131 Bytes
  • Size of remote file: 752 kB
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": [
4
+ 151329,
5
+ 151329,
6
+ 151336,
7
+ 151338
8
+ ],
9
+ "pad_token_id": 151329,
10
+ "transformers_version": "5.3.0"
11
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0f50ca6769b69a3c0e5d35e39ffdf439116bec31646d8e7dde5da620e7e2395
3
+ size 32485059448
modeling_gravity_moe.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2026 Trillion Labs and the HuggingFace Inc. team. All rights reserved.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ GravityMoE model — inherits from DeepSeek V3.
16
+
17
+ GravityMoE shares the same sparse Mixture-of-Experts architecture as DeepSeek V3
18
+ (MLA attention, sigmoid routing with bias correction, shared + routed experts)
19
+ but with different model hyperparameters. All modeling logic is inherited from
20
+ the DeepSeek V3 implementation in `transformers`.
21
+ """
22
+
23
+ from transformers.conversion_mapping import _MODEL_TO_CONVERSION_PATTERN
24
+ from transformers.models.deepseek_v3.modeling_deepseek_v3 import (
25
+ DeepseekV3ForCausalLM,
26
+ DeepseekV3Model,
27
+ DeepseekV3PreTrainedModel,
28
+ )
29
+
30
+ from .configuration_gravity_moe import GravityMoEConfig
31
+
32
+ # Register weight conversion so that from_pretrained fuses per-expert
33
+ # checkpoint weights (experts.*.gate_proj, etc.) into 3D tensors
34
+ # (experts.gate_up_proj, experts.down_proj), same as DeepSeek V3.
35
+ _MODEL_TO_CONVERSION_PATTERN["gravity_moe"] = "qwen2_moe"
36
+
37
+
38
+ class GravityMoEPreTrainedModel(DeepseekV3PreTrainedModel):
39
+ config_class = GravityMoEConfig
40
+ _keep_in_fp32_modules_strict = ["e_score_correction_bias"]
41
+ _keys_to_ignore_on_load_unexpected = [r"model\.layers\.28.*"]
42
+
43
+
44
+ class GravityMoEModel(DeepseekV3Model):
45
+ config_class = GravityMoEConfig
46
+
47
+
48
+ class GravityMoEForCausalLM(DeepseekV3ForCausalLM):
49
+ config_class = GravityMoEConfig
50
+
51
+
52
+ __all__ = [
53
+ "GravityMoEPreTrainedModel",
54
+ "GravityMoEModel",
55
+ "GravityMoEForCausalLM",
56
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "[MASK]",
5
+ "[gMASK]",
6
+ "[sMASK]",
7
+ "<sop>",
8
+ "<eop>",
9
+ "<|system|>",
10
+ "<|user|>",
11
+ "<|assistant|>",
12
+ "<|observation|>",
13
+ "<|begin_of_image|>",
14
+ "<|end_of_image|>",
15
+ "<|begin_of_video|>",
16
+ "<|end_of_video|>",
17
+ "<|begin_of_audio|>",
18
+ "<|end_of_audio|>",
19
+ "<|begin_of_transcription|>",
20
+ "<|end_of_transcription|>",
21
+ "<|code_prefix|>",
22
+ "<|code_middle|>",
23
+ "<|code_suffix|>",
24
+ "/nothink"
25
+ ],
26
+ "eos_token": {
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "pad_token": {
34
+ "content": "<|endoftext|>",
35
+ "lstrip": false,
36
+ "normalized": false,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ }
40
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bda8e2146c3bb7b7e0fc96dcc4f0aeff041c6c27952e3ace0665663ebff346ba
3
+ size 19970700
tokenizer_config.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "151329": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "151330": {
12
+ "content": "[MASK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "151331": {
20
+ "content": "[gMASK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "151332": {
28
+ "content": "[sMASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "151333": {
36
+ "content": "<sop>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "151334": {
44
+ "content": "<eop>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "151335": {
52
+ "content": "<|system|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "151336": {
60
+ "content": "<|user|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "151337": {
68
+ "content": "<|assistant|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "151338": {
76
+ "content": "<|observation|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "151339": {
84
+ "content": "<|begin_of_image|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "151340": {
92
+ "content": "<|end_of_image|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "151341": {
100
+ "content": "<|begin_of_video|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "151342": {
108
+ "content": "<|end_of_video|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "151343": {
116
+ "content": "<|begin_of_audio|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "151344": {
124
+ "content": "<|end_of_audio|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "151345": {
132
+ "content": "<|begin_of_transcription|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "151346": {
140
+ "content": "<|end_of_transcription|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "151347": {
148
+ "content": "<|code_prefix|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "151348": {
156
+ "content": "<|code_middle|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "151349": {
164
+ "content": "<|code_suffix|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "151350": {
172
+ "content": "<think>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": false
178
+ },
179
+ "151351": {
180
+ "content": "</think>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": false
186
+ },
187
+ "151352": {
188
+ "content": "<tool_call>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": false
194
+ },
195
+ "151353": {
196
+ "content": "</tool_call>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": false
202
+ },
203
+ "151354": {
204
+ "content": "<tool_response>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": false
210
+ },
211
+ "151355": {
212
+ "content": "</tool_response>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": false
218
+ },
219
+ "151356": {
220
+ "content": "<arg_key>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": false
226
+ },
227
+ "151357": {
228
+ "content": "</arg_key>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": false
234
+ },
235
+ "151358": {
236
+ "content": "<arg_value>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": false
242
+ },
243
+ "151359": {
244
+ "content": "</arg_value>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": false
250
+ },
251
+ "151360": {
252
+ "content": "/nothink",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "151361": {
260
+ "content": "<|begin_of_box|>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": false
266
+ },
267
+ "151362": {
268
+ "content": "<|end_of_box|>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": false
274
+ },
275
+ "151363": {
276
+ "content": "<|image|>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": false
282
+ },
283
+ "151364": {
284
+ "content": "<|video|>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": false
290
+ }
291
+ },
292
+ "additional_special_tokens": [
293
+ "<|endoftext|>",
294
+ "[MASK]",
295
+ "[gMASK]",
296
+ "[sMASK]",
297
+ "<sop>",
298
+ "<eop>",
299
+ "<|system|>",
300
+ "<|user|>",
301
+ "<|assistant|>",
302
+ "<|observation|>",
303
+ "<|begin_of_image|>",
304
+ "<|end_of_image|>",
305
+ "<|begin_of_video|>",
306
+ "<|end_of_video|>",
307
+ "<|begin_of_audio|>",
308
+ "<|end_of_audio|>",
309
+ "<|begin_of_transcription|>",
310
+ "<|end_of_transcription|>",
311
+ "<|code_prefix|>",
312
+ "<|code_middle|>",
313
+ "<|code_suffix|>",
314
+ "/nothink"
315
+ ],
316
+ "clean_up_tokenization_spaces": false,
317
+ "do_lower_case": false,
318
+ "eos_token": "<|endoftext|>",
319
+ "extra_special_tokens": {},
320
+ "model_max_length": 128000,
321
+ "pad_token": "<|endoftext|>",
322
+ "padding_side": "left",
323
+ "remove_space": false,
324
+ "tokenizer_class": "PreTrainedTokenizerFast"
325
+ }