LG-AI-EXAONE commited on
Commit
acb250c
·
0 Parent(s):

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/EXAONE_Symbol+BI_3d.png filter=lfs diff=lfs merge=lfs -text
37
+ assets/exaone45_input1.jpg filter=lfs diff=lfs merge=lfs -text
38
+ assets/exaone45_input2.png filter=lfs diff=lfs merge=lfs -text
39
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ EXAONE AI Model License Agreement 1.2 - NC
2
+
3
+ This License Agreement (“Agreement”) is entered into between you (“Licensee”) and LG Management Development
4
+ Institute Co., Ltd. (“Licensor”), governing the use of the EXAONE AI Model (“Model”). By downloading,
5
+ installing, copying, or using the Model, you agree to comply with and be bound by the terms of this Agreement.
6
+ If you do not agree to all the terms, you must not download, install, copy, or use the Model. This Agreement
7
+ constitutes a binding legal agreement between the Licensee and Licensor.
8
+
9
+ 1. Definitions
10
+ 1.1 Model: The artificial intelligence model provided by Licensor, which includes any software,
11
+ algorithms, machine learning models, or related components supplied by Licensor. This definition extends
12
+ to encompass all updates, enhancements, improvements, bug fixes, patches, or other modifications that may
13
+ be provided by Licensor from time to time, whether automatically or manually implemented.
14
+ 1.2 Derivatives: Any modifications, alterations, enhancements, improvements, adaptations, or derivative
15
+ works of the Model created by Licensee or any third party. This includes changes made to the Model's
16
+ architecture, parameters, data processing methods, or any other aspect of the Model that results in a
17
+ modification of its functionality or output.
18
+ 1.3 Output: Any data, results, content, predictions, analyses, insights, or other materials generated by
19
+ the Model or Derivatives, regardless of whether they are in their original form or have been further
20
+ processed or modified by the Licensee. This includes, but is not limited to, textual or numerical produced
21
+ directly or indirectly through the use of the Model.
22
+ 1.4 Licensor: LG Management Development Institute Co., Ltd., the owner, developer, and provider of the
23
+ EXAONE AI Model. The Licensor holds all rights, title, and interest in the Model and is responsible for
24
+ granting licenses to use the Model under the terms specified in this Agreement.
25
+ 1.5 Licensee: The individual, organization, corporation, academic institution, government agency, or other
26
+ entity using or intending to use the Model under the terms and conditions of this Agreement. The Licensee
27
+ is responsible for ensuring compliance with the Agreement by all authorized users who access or utilize
28
+ the Model on behalf of the Licensee.
29
+
30
+ 2. License Grant
31
+ 2.1 Grant of License: Subject to the terms and conditions outlined in this Agreement, the Licensor hereby
32
+ grants the Licensee a limited, non-exclusive, non-transferable, worldwide, and revocable license to:
33
+ a. Access, download, install, and use the Model solely for research and educational purposes. This
34
+ includes evaluation, testing, academic research, experimentation, learning, teaching, training and
35
+ participation in competitions, provided that such participation is in a non-commercial context.
36
+ Notwithstanding Section 3.1, the Licensee may only provide the Model or Derivatives for a competition
37
+ if no commercial license is granted to the competition organizer or any third party.
38
+ b. Publicly disclose research results and findings derived from the use of the Model or Derivatives,
39
+ including publishing papers or presentations.
40
+ c. Modify the Model and create Derivatives based on the Model, provided that such modifications and
41
+ Derivatives are used exclusively for research and educational purposes. The Licensee may conduct
42
+ experiments, perform analyses, and apply custom modifications to the Model to explore its capabilities
43
+ and performance under various scenarios. If the Model is modified, the modified Model must include
44
+ "EXAONE" at the beginning of its name.
45
+ d. Distribute the Model and Derivatives in each case with a copy of this Agreement.
46
+ 2.2 Scope of License: The license granted herein does not authorize the Licensee to use the Model for any
47
+ purpose not explicitly permitted under this Agreement. Any use beyond the scope of this license, including
48
+ any commercial application or external distribution, is strictly prohibited unless explicitly agreed upon
49
+ in writing by the Licensor.
50
+
51
+ 3. Restrictions
52
+ 3.1 Commercial Use: The Licensee is expressly prohibited from using the Model, Derivatives, or Output for
53
+ any commercial purposes, including but not limited to, developing or deploying products, services, or
54
+ applications that generate revenue, whether directly or indirectly. Any commercial exploitation of the
55
+ Model or its derivatives requires a separate commercial license agreement with the Licensor. Furthermore,
56
+ the Licensee shall not use the Model, Derivatives or Output to develop or improve any models that compete
57
+ with the Licensor’s models.
58
+ 3.2 Reverse Engineering: The Licensee shall not decompile, disassemble, reverse engineer, or attempt to
59
+ derive the source code, underlying ideas, algorithms, or structure of the Model, except to the extent that
60
+ such activities are expressly permitted by applicable law. Any attempt to bypass or circumvent
61
+ technological protection measures applied to the Model is strictly prohibited.
62
+ 3.3 Unlawful Use: The Licensee shall not use the Model and Derivatives for any illegal, fraudulent, or
63
+ unauthorized activities, nor for any purpose that violates applicable laws or regulations. This includes
64
+ but is not limited to the creation, distribution, or dissemination of malicious, deceptive, or unlawful
65
+ content.
66
+ 3.4 Ethical Use: The Licensee shall ensure that the Model or Derivatives is used in an ethical and
67
+ responsible manner, adhering to the following guidelines:
68
+ a. The Model and Derivatives shall not be used to generate, propagate, or amplify false, misleading,
69
+ or harmful information, including fake news, misinformation, or disinformation.
70
+ b. The Model and Derivatives shall not be employed to create, distribute, or promote content that is
71
+ discriminatory, harassing, defamatory, abusive, or otherwise offensive to individuals or groups based
72
+ on race, gender, sexual orientation, religion, nationality, or other protected characteristics.
73
+ c. The Model and Derivatives shall not infringe on the rights of others, including intellectual property
74
+ rights, privacy rights, or any other rights recognized by law. The Licensee shall obtain all necessary
75
+ permissions and consents before using the Model and Derivatives in a manner that may impact the rights
76
+ of third parties.
77
+ d. The Model and Derivatives shall not be used in a way that causes harm, whether physical, mental,
78
+ emotional, or financial, to individuals, organizations, or communities. The Licensee shall take all
79
+ reasonable measures to prevent misuse or abuse of the Model and Derivatives that could result in harm
80
+ or injury.
81
+
82
+ 4. Ownership
83
+ 4.1 Intellectual Property: All rights, title, and interest in and to the Model, including any
84
+ modifications, Derivatives, and associated documentation, are and shall remain the exclusive property of
85
+ the Licensor. The Licensee acknowledges that this Agreement does not transfer any ownership rights to the
86
+ Licensee. All trademarks, service marks, and logos associated with the Model are the property of the
87
+ Licensor.
88
+ 4.2 Output: Licensor claims no rights in Output. Licensee is solely responsible for the Output and its use.
89
+ 4.3 Attribution: In any publication or presentation of results obtained using the Model, the Licensee
90
+ shall provide appropriate attribution to the Licensor, citing the Model's name and version, along with any
91
+ relevant documentation or references specified by the Licensor.
92
+
93
+ 5. No Warranty
94
+ 5.1 “As-Is” Basis: The Model, Derivatives, and Output are provided on an “as-is” and “as-available” basis,
95
+ without any warranties or representations of any kind, whether express, implied, or statutory. The Licensor
96
+ disclaims all warranties, including but not limited to, implied warranties of merchantability, fitness for
97
+ a particular purpose, accuracy, reliability, non-infringement, or any warranty arising from the course of
98
+ dealing or usage of trade.
99
+ 5.2 Performance and Reliability: The Licensor does not warrant or guarantee that the Model, Derivatives or
100
+ Output will meet the Licensee’s requirements, that the operation of the Model, Derivatives or Output will
101
+ be uninterrupted or error-free, or that defects in the Model will be corrected. The Licensee acknowledges
102
+ that the use of the Model, Derivatives or Output is at its own risk and that the Model, Derivatives or
103
+ Output may contain bugs, errors, or other limitations.
104
+ 5.3 No Endorsement: The Licensor does not endorse, approve, or certify any results, conclusions, or
105
+ recommendations derived from the use of the Model. The Licensee is solely responsible for evaluating the
106
+ accuracy, reliability, and suitability of the Model for its intended purposes.
107
+
108
+ 6. Limitation of Liability
109
+ 6.1 No Liability for Damages: To the fullest extent permitted by applicable law, in no event shall the
110
+ Licensor be liable for any special, incidental, indirect, consequential, exemplary, or punitive damages,
111
+ including but not limited to, damages for loss of business profits, business interruption, loss of business
112
+ information, loss of data, or any other pecuniary or non-pecuniary loss arising out of or in connection with
113
+ the use or inability to use the Model, Derivatives or any Output, even if the Licensor has been advised of
114
+ the possibility of such damages.
115
+ 6.2 Indemnification: The Licensee agrees to indemnify, defend, and hold harmless the Licensor, its
116
+ affiliates, officers, directors, employees, and agents from and against any claims, liabilities, damages,
117
+ losses, costs, or expenses (including reasonable attorneys' fees) arising out of or related to the
118
+ Licensee's use of the Model, any Derivatives, or any Output, including any violation of this Agreement or
119
+ applicable laws.
120
+
121
+ 7. Termination
122
+ 7.1 Termination by Licensor: The Licensor reserves the right to terminate this Agreement and revoke the
123
+ Licensee’s rights to use the Model at any time, with or without cause, and without prior notice if the
124
+ Licensee breaches any of the terms or conditions of this Agreement. Termination shall be effective
125
+ immediately upon notice.
126
+ 7.2 Effect of Termination: Upon termination of this Agreement, the Licensee must immediately cease all use
127
+ of the Model and Derivatives and destroy all copies of the Model and Derivatives in its possession or
128
+ control, including any backup or archival copies. The Licensee shall certify in writing to the Licensor that
129
+ such destruction has been completed.
130
+ 7.3 Survival: The provisions of this Agreement that by their nature should survive termination, including
131
+ but not limited to, Sections 4 (Ownership), 5 (No Warranty), 6 (Limitation of Liability), and this Section 7
132
+ (Termination), shall continue to apply after termination.
133
+
134
+ 8. Governing Law
135
+ 8.1 Governing Law: This Agreement shall be governed by and construed in accordance with the laws of the
136
+ Republic of Korea, without regard to its conflict of laws principles.
137
+ 8.2 Arbitration: Any disputes, controversies, or claims arising out of or relating to this Agreement,
138
+ including its existence, validity, interpretation, performance, breach, or termination, shall be referred
139
+ to and finally resolved by arbitration administered by the Korean Commercial Arbitration Board (KCAB) in
140
+ accordance with the International Arbitration Rules of the Korean Commercial Arbitration Board in force at
141
+ the time of the commencement of the arbitration. The seat of arbitration shall be Seoul, Republic of Korea.
142
+ The tribunal shall consist of one arbitrator. The language of the arbitration shall be English.
143
+
144
+ 9. Alterations
145
+ 9.1 Modifications: The Licensor reserves the right to modify or amend this Agreement at any time, in its
146
+ sole discretion. Any modifications will be effective upon posting the updated Agreement on the Licensor’s
147
+ website or through other means of communication. The Licensee is responsible for reviewing the Agreement
148
+ periodically for changes. Continued use of the Model after any modifications have been made constitutes
149
+ acceptance of the revised Agreement.
150
+ 9.2 Entire Agreement: This Agreement constitutes the entire agreement between the Licensee and Licensor
151
+ concerning the subject matter hereof and supersedes all prior or contemporaneous oral or written agreements,
152
+ representations, or understandings. Any terms or conditions of any purchase order or other document
153
+ submitted by the Licensee in connection with the Model that are in addition to, different from, or
154
+ inconsistent with the terms and conditions of this Agreement are not binding on the Licensor and are void.
155
+
156
+ By downloading, installing, or using the EXAONE AI Model, the Licensee acknowledges that it has read,
157
+ understood, and agrees to be bound by the terms and conditions of this Agreement.
README.md ADDED
@@ -0,0 +1,820 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: exaone
4
+ license_link: LICENSE
5
+ language:
6
+ - en
7
+ - ko
8
+ - es
9
+ - de
10
+ - ja
11
+ - vi
12
+ tags:
13
+ - lg-ai
14
+ - exaone
15
+ pipeline_tag: image-text-to-text
16
+ library_name: transformers
17
+ ---
18
+
19
+ <br>
20
+ <br>
21
+ <p align="center">
22
+ <img src="assets/EXAONE_Symbol+BI_3d.png" width="400">
23
+ <br>
24
+ <br>
25
+ <br>
26
+
27
+ <div align="center">
28
+ <a href="https://huggingface.co/collections/LGAI-EXAONE/exaone-45" style="text-decoration: none;">
29
+ <img src="https://img.shields.io/badge/🤗-HuggingFace-FC926C?style=for-the-badge" alt="HuggingFace">
30
+ </a>
31
+ <a href="https://www.lgresearch.ai/blog/view?seq=641" style="text-decoration: none;">
32
+ <img src="https://img.shields.io/badge/📝-Blog-E343BD?style=for-the-badge" alt="Blog">
33
+ </a>
34
+ <a href="https://github.com/LG-AI-EXAONE/EXAONE-4.5/blob/main/assets/Technical_Report__EXAONE_4_5.pdf" style="text-decoration: none;">
35
+ <img src="https://img.shields.io/badge/📑-Technical_Report_(TBU)-684CF4?style=for-the-badge" alt="Technical Report">
36
+ </a>
37
+ <a href="https://github.com/LG-AI-EXAONE/EXAONE-4.5" style="text-decoration: none;">
38
+ <img src="https://img.shields.io/badge/🖥️-GitHub-2B3137?style=for-the-badge" alt="GitHub">
39
+ </a>
40
+ <!-- <a href="#" style="text-decoration: none;">
41
+ <img src="https://img.shields.io/badge/✈️_API-Try_on_FriendliAI-2649BC?style=for-the-badge" alt="FriendliAI">
42
+ </a> -->
43
+ </div>
44
+
45
+
46
+
47
+
48
+ <br><br>
49
+
50
+ # EXAONE 4.5
51
+
52
+ We introduce EXAONE 4.5, the first open-weight vision language model developed by LG AI Research.
53
+ Integrating a dedicated visual encoder into the existing EXAONE 4.0 framework, we expand the model's capability toward multimodality.
54
+ EXAONE 4.5 features 33 billion parameters in total, including 1.2 billion parameters from the vision encoder.
55
+ EXAONE 4.5 achieves competitive performance in general benchmark while outperforming SOTA models of similar size in document understanding and Korean contextual reasoning, inheriting powerful language capabilities from our previous language models.
56
+
57
+ For more details, please refer to the [technical report](https://github.com/LG-AI-EXAONE/EXAONE-4.5/blob/main/assets/Technical_Report__EXAONE_4_5.pdf), [blog](https://www.lgresearch.ai/blog/view?seq=641) and [GitHub](https://github.com/LG-AI-EXAONE/EXAONE-4.5).
58
+
59
+
60
+ ### Model Configuration
61
+
62
+ - Model Type: Causal Language Model + Vision Encoder
63
+ - Number of Parameters (Language Model): 31.7B
64
+ - Number of Parameters (Vision Encoder): 1.29B
65
+ - Hidden Dimension: 5,120
66
+ - Intermediate size: 27,392
67
+ - Number of Layers: 64 Main layers + 1 MTP layers
68
+ - Hybrid Attention Pattern: 16 x (3 Sliding window attention + 1 Global attention)
69
+ - Reordered Norm: Apply normalization after Attention/MLP, and before residual connection
70
+ - Sliding Window Attention
71
+ - Number of Attention Heads: 40 Q-heads and 8 KV-heads
72
+ - Head Dimension: 128 for both Q/KV
73
+ - Sliding Window Size: 128
74
+ - Global Attention
75
+ - Number of Attention Heads: 40 Q-heads and 8 KV-heads
76
+ - Head Dimension: 128 for both Q/KV
77
+ - No Rotary Positional Embedding Used (NoPE)
78
+ - Vision Encoder
79
+ - Grouped Query Attention (GQA)
80
+ - 2D RoPE for vision embeddings
81
+ - Vocab Size: 153,600
82
+ - Context Length: 262,144 tokens
83
+ - Knowledge Cutoff: Dec 2024 (2024/12)
84
+
85
+
86
+
87
+ ## Evaluation Results
88
+
89
+
90
+
91
+ ### Vision-Language Tasks
92
+
93
+ <table>
94
+ <tr>
95
+ <th style="background: rgba(128,128,128,0.1); text-align: center;"> </th>
96
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">EXAONE 4.5 33B (Reasoning)</th>
97
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">GPT-5 mini (Reasoning: high)</th>
98
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">Qwen3-VL 32B Thinking</th>
99
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">Qwen3-VL 235B Thinking</th>
100
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">Qwen3.5 27B (Reasoning)</th>
101
+ </tr>
102
+ <tr>
103
+ <td align="center">Architecture</td>
104
+ <td align="center">Dense</td>
105
+ <td align="center">-</td>
106
+ <td align="center">Dense</td>
107
+ <td align="center">MoE</td>
108
+ <td align="center">Dense</td>
109
+ </tr>
110
+ <tr>
111
+ <td align="center">Total Params</td>
112
+ <td align="center">33B</td>
113
+ <td align="center">-</td>
114
+ <td align="center">33B</td>
115
+ <td align="center">236B</td>
116
+ <td align="center">27B</td>
117
+ </tr>
118
+ <tr>
119
+ <td align="center">Active Params</td>
120
+ <td align="center">33B</td>
121
+ <td align="center">-</td>
122
+ <td align="center">33B</td>
123
+ <td align="center">22B</td>
124
+ <td align="center">27B</td>
125
+ </tr>
126
+ <tr>
127
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>STEM / Puzzle</i></td>
128
+ </tr>
129
+ <tr>
130
+ <td align="center">MMMU</td>
131
+ <td align="center">78.7</td>
132
+ <td align="center">79.0</td>
133
+ <td align="center">78.1</td>
134
+ <td align="center">80.6</td>
135
+ <td align="center">82.3</td>
136
+ </tr>
137
+ <tr>
138
+ <td align="center">MMMU-Pro</td>
139
+ <td align="center">68.6</td>
140
+ <td align="center">67.3</td>
141
+ <td align="center">68.1</td>
142
+ <td align="center">69.3</td>
143
+ <td align="center">75.0</td>
144
+ </tr>
145
+ <tr>
146
+ <td align="center">MedXpertQA-MM</td>
147
+ <td align="center">42.1</td>
148
+ <td align="center">34.4</td>
149
+ <td align="center">41.6</td>
150
+ <td align="center">47.6</td>
151
+ <td align="center">62.4</td>
152
+ </tr>
153
+ <tr>
154
+ <td align="center">MathVision</td>
155
+ <td align="center">75.2</td>
156
+ <td align="center">71.9</td>
157
+ <td align="center">70.2</td>
158
+ <td align="center">74.6</td>
159
+ <td align="center">86.0</td>
160
+ </tr>
161
+ <tr>
162
+ <td align="center">MathVista (mini)</td>
163
+ <td align="center">85.0</td>
164
+ <td align="center">79.1</td>
165
+ <td align="center">85.9</td>
166
+ <td align="center">85.8</td>
167
+ <td align="center">87.8</td>
168
+ </tr>
169
+ <tr>
170
+ <td align="center">WeMath</td>
171
+ <td align="center">79.1</td>
172
+ <td align="center">70.3</td>
173
+ <td align="center">71.6</td>
174
+ <td align="center">74.8</td>
175
+ <td align="center">84.0</td>
176
+ </tr>
177
+ <tr>
178
+ <td align="center">LogicVista</td>
179
+ <td align="center">73.8</td>
180
+ <td align="center">70.3</td>
181
+ <td align="center">70.9</td>
182
+ <td align="center">72.2</td>
183
+ <td align="center">77.0</td>
184
+ </tr>
185
+ <tr>
186
+ <td align="center">BabyVision</td>
187
+ <td align="center">18.8</td>
188
+ <td align="center">20.9</td>
189
+ <td align="center">17.4</td>
190
+ <td align="center">22.2</td>
191
+ <td align="center">44.6</td>
192
+ </tr>
193
+ <tr>
194
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>Document Understanding</i></td>
195
+ </tr>
196
+ <tr>
197
+ <td align="center">AI2D</td>
198
+ <td align="center">89.0</td>
199
+ <td align="center">88.2</td>
200
+ <td align="center">88.9</td>
201
+ <td align="center">89.2</td>
202
+ <td align="center">92.9</td>
203
+ </tr>
204
+ <tr>
205
+ <td align="center">ChartQAPro</td>
206
+ <td align="center">62.2</td>
207
+ <td align="center">60.9</td>
208
+ <td align="center">61.4</td>
209
+ <td align="center">61.2</td>
210
+ <td align="center">66.8</td>
211
+ </tr>
212
+ <tr>
213
+ <td align="center">CharXiv (RQ)</td>
214
+ <td align="center">71.7</td>
215
+ <td align="center">68.6</td>
216
+ <td align="center">65.2</td>
217
+ <td align="center">66.1</td>
218
+ <td align="center">79.5</td>
219
+ </tr>
220
+ <tr>
221
+ <td align="center">OCRBench v2</td>
222
+ <td align="center">63.2</td>
223
+ <td align="center">55.8</td>
224
+ <td align="center">68.4</td>
225
+ <td align="center">66.8</td>
226
+ <td align="center">67.3</td>
227
+ </tr>
228
+ <tr>
229
+ <td align="center">OmniDocBench v1.5</td>
230
+ <td align="center">81.2</td>
231
+ <td align="center">77.0</td>
232
+ <td align="center">83.1</td>
233
+ <td align="center">84.5</td>
234
+ <td align="center">88.9</td>
235
+ </tr>
236
+ <tr>
237
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>General</i></td>
238
+ </tr>
239
+ <tr>
240
+ <td align="center">MMStar</td>
241
+ <td align="center">74.9</td>
242
+ <td align="center">74.1</td>
243
+ <td align="center">79.4</td>
244
+ <td align="center">78.7</td>
245
+ <td align="center">81.0</td>
246
+ </tr>
247
+ <tr>
248
+ <td align="center">BLINK</td>
249
+ <td align="center">68.8</td>
250
+ <td align="center">67.7</td>
251
+ <td align="center">68.5</td>
252
+ <td align="center">67.1</td>
253
+ <td align="center">71.6</td>
254
+ </tr>
255
+ <tr>
256
+ <td align="center">HallusionBench</td>
257
+ <td align="center">63.7</td>
258
+ <td align="center">63.2</td>
259
+ <td align="center">67.4</td>
260
+ <td align="center">66.7</td>
261
+ <td align="center">70.0</td>
262
+ </tr>
263
+ <tr>
264
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>Korean</i></td>
265
+ </tr>
266
+ <tr>
267
+ <td align="center">KMMMU</td>
268
+ <td align="center">42.7</td>
269
+ <td align="center">42.6</td>
270
+ <td align="center">37.8</td>
271
+ <td align="center">42.1</td>
272
+ <td align="center">51.7</td>
273
+ </tr>
274
+ <tr>
275
+ <td align="center">K-Viscuit</td>
276
+ <td align="center">80.1</td>
277
+ <td align="center">78.5</td>
278
+ <td align="center">78.5</td>
279
+ <td align="center">83.9</td>
280
+ <td align="center">84.0</td>
281
+ </tr>
282
+ <tr>
283
+ <td align="center">KRETA</td>
284
+ <td align="center">91.9</td>
285
+ <td align="center">94.8</td>
286
+ <td align="center">90.3</td>
287
+ <td align="center">92.8</td>
288
+ <td align="center">96.5</td>
289
+ </tr>
290
+ </table>
291
+
292
+
293
+ ### Language-only Tasks
294
+
295
+ <table>
296
+ <tr>
297
+ <th style="background: rgba(128,128,128,0.1); text-align: center;"> </th>
298
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">EXAONE 4.5 33B (Reasoning)</th>
299
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">GPT-5 mini (Reasoning: high)</th>
300
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">K-EXAONE 236B (Reasoning)</th>
301
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">Qwen3-VL 235B Thinking</th>
302
+ <th style="background: rgba(128,128,128,0.1); text-align: center;">Qwen3.5 27B (Reasoning)</th>
303
+ </tr>
304
+ <tr>
305
+ <td align="center">Architecture</td>
306
+ <td align="center">Dense</td>
307
+ <td align="center">-</td>
308
+ <td align="center">MoE</td>
309
+ <td align="center">MoE</td>
310
+ <td align="center">Dense</td>
311
+ </tr>
312
+ <tr>
313
+ <td align="center">Total Params</td>
314
+ <td align="center">33B</td>
315
+ <td align="center">-</td>
316
+ <td align="center">236B</td>
317
+ <td align="center">236B</td>
318
+ <td align="center">27B</td>
319
+ </tr>
320
+ <tr>
321
+ <td align="center">Active Params</td>
322
+ <td align="center">33B</td>
323
+ <td align="center">-</td>
324
+ <td align="center">23B</td>
325
+ <td align="center">22B</td>
326
+ <td align="center">27B</td>
327
+ </tr>
328
+ <tr>
329
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>Reasoning</i></td>
330
+ </tr>
331
+ <tr>
332
+ <td align="center">AIME 2025</td>
333
+ <td align="center">92.9</td>
334
+ <td align="center">91.1</td>
335
+ <td align="center">92.8</td>
336
+ <td align="center">89.7</td>
337
+ <td align="center">93.5</td>
338
+ </tr>
339
+ <tr>
340
+ <td align="center">AIME 2026</td>
341
+ <td align="center">92.6</td>
342
+ <td align="center">92.4</td>
343
+ <td align="center">92.2</td>
344
+ <td align="center">89.4</td>
345
+ <td align="center">90.8</td>
346
+ </tr>
347
+ <tr>
348
+ <td align="center">GPQA-Diamond</td>
349
+ <td align="center">80.5</td>
350
+ <td align="center">82.3</td>
351
+ <td align="center">79.1</td>
352
+ <td align="center">77.1</td>
353
+ <td align="center">85.5</td>
354
+ </tr>
355
+ <tr>
356
+ <td align="center">LiveCodeBench v6</td>
357
+ <td align="center">81.4</td>
358
+ <td align="center">78.1</td>
359
+ <td align="center">80.7</td>
360
+ <td align="center">70.1</td>
361
+ <td align="center">80.7</td>
362
+ </tr>
363
+ <tr>
364
+ <td align="center">MMLU-Pro</td>
365
+ <td align="center">83.3</td>
366
+ <td align="center">83.3</td>
367
+ <td align="center">83.8</td>
368
+ <td align="center">83.8</td>
369
+ <td align="center">86.1</td>
370
+ </tr>
371
+ <tr>
372
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>Agentic Tool Use</i></td>
373
+ </tr>
374
+ <tr>
375
+ <td align="center">τ<sup>2</sup>-Bench (Retail)</td>
376
+ <td align="center">77.9</td>
377
+ <td align="center">78.3</td>
378
+ <td align="center">78.6</td>
379
+ <td align="center">67.0</td>
380
+ <td align="center">84.7</td>
381
+ </tr>
382
+ <tr>
383
+ <td align="center">τ<sup>2</sup>-Bench (Airline)</td>
384
+ <td align="center">56.5</td>
385
+ <td align="center">60.0</td>
386
+ <td align="center">60.4</td>
387
+ <td align="center">62.0</td>
388
+ <td align="center">67.5</td>
389
+ </tr>
390
+ <tr>
391
+ <td align="center">τ<sup>2</sup>-Bench (Telecom)</td>
392
+ <td align="center">73.0</td>
393
+ <td align="center">74.1</td>
394
+ <td align="center">73.5</td>
395
+ <td align="center">44.7</td>
396
+ <td align="center">99.3</td>
397
+ </tr>
398
+ <tr>
399
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>Instruction Following</i></td>
400
+ </tr>
401
+ <tr>
402
+ <td align="center">IFBench</td>
403
+ <td align="center">62.6</td>
404
+ <td align="center">74.0</td>
405
+ <td align="center">67.3</td>
406
+ <td align="center">59.2</td>
407
+ <td align="center">76.5</td>
408
+ </tr>
409
+ <tr>
410
+ <td align="center">IFEval</td>
411
+ <td align="center">89.6</td>
412
+ <td align="center">92.8</td>
413
+ <td align="center">89.7</td>
414
+ <td align="center">88.2</td>
415
+ <td align="center">95.0</td>
416
+ </tr>
417
+ <tr>
418
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>Long Context Understanding</i></td>
419
+ </tr>
420
+ <tr>
421
+ <td align="center">AA-LCR</td>
422
+ <td align="center">50.6</td>
423
+ <td align="center">68.0</td>
424
+ <td align="center">53.5</td>
425
+ <td align="center">58.7</td>
426
+ <td align="center">67.3</td>
427
+ </tr>
428
+ <tr>
429
+ <td align="center" colspan='7' style="background: linear-gradient(90deg, rgba(252,146,108,0.3) 0%, rgba(227,67,189,0.3) 50%, rgba(104,76,244,0.3) 100%); font-weight: bold; height:32px; padding-top:2px; padding-bottom:2px;"><i>Korean</i></td>
430
+ </tr>
431
+ <tr>
432
+ <td align="center">KMMLU-Pro</td>
433
+ <td align="center">67.6</td>
434
+ <td align="center">72.5</td>
435
+ <td align="center">67.3</td>
436
+ <td align="center">71.1</td>
437
+ <td align="center">73.0</td>
438
+ </tr>
439
+ <tr>
440
+ <td align="center">KoBALT</td>
441
+ <td align="center">52.1</td>
442
+ <td align="center">63.6</td>
443
+ <td align="center">61.8</td>
444
+ <td align="center">51.1</td>
445
+ <td align="center">54.9</td>
446
+ </tr>
447
+ </table>
448
+
449
+
450
+
451
+
452
+ ## Quickstart
453
+
454
+
455
+ ### Serving EXAONE 4.5
456
+
457
+ For better inference speed and memory usage, it is preferred to serve the model using optimized inference engines. The EXAONE 4.5 model is supported by various frameworks, including TensorRT-LLM, vLLM, SGLang, and llama.cpp. Support will be expanded in the future.
458
+
459
+ Practically, you can serve the EXAONE 4.5 model with 256K context length on **single H200 GPU**, or **4x A100-40GB GPUs** by using a tensor-parallelism.
460
+
461
+
462
+ ### vLLM
463
+
464
+ Both Transformers and vLLM of our forks are required to utilize EXAONE 4.5 model.
465
+ You can install the requirements by running the following commands:
466
+
467
+ ```bash
468
+ uv pip install git+https://github.com/lkm2835/vllm.git@add-exaone4_5
469
+ uv pip install git+https://github.com/nuxlear/transformers.git@add-exaone4_5
470
+ ```
471
+
472
+ After you install the vLLM, you can launch the server with the following code snippet. You can remove unnecessary arguments from the snippet.
473
+
474
+ ```bash
475
+ vllm serve LGAI-EXAONE/EXAONE-4.5-33B \
476
+ --served-model-name EXAONE-4.5-33B \
477
+ --port 8000 \
478
+ --tensor-parallel-size 2 \
479
+ --max-model-len 262144 \
480
+ --reasoning-parser qwen3 \
481
+ --enable-auto-tool-choice \
482
+ --tool-call-parser hermes \
483
+ --limit-mm-per-prompt '{"image": 64}' \
484
+ --speculative_config '{
485
+ "method": "mtp",
486
+ "num_speculative_tokens": 3
487
+ }'
488
+
489
+ ```
490
+
491
+ An OpenAI-compatible API server will be available at http://localhost:8000/v1.
492
+
493
+
494
+ ### SGLang
495
+
496
+ Both Transformers and SGLang of our forks are required to utilize EXAONE 4.5 model.
497
+ You can install the requirements by running the following commands:
498
+
499
+ ```bash
500
+ uv pip install git+https://github.com/lkm2835/sglang.git@add-exaone4_5
501
+ uv pip install git+https://github.com/nuxlear/transformers.git@add-exaone4_5
502
+ ```
503
+
504
+ After you install the SGLang, you can launch the server with the following code snippet. You can remove unnecessary arguments from the snippet.
505
+
506
+ ```bash
507
+ python -m sglang.launch_server \
508
+ --model-path LGAI-EXAONE/EXAONE-4.5-33B \
509
+ --served-model-name EXAONE-4.5-33B \
510
+ --port 8000 \
511
+ --tp-size 2 \
512
+ --mem-frac 0.81 \
513
+ --reasoning-parser qwen3 \
514
+ --tool-call-parser hermes \
515
+ --speculative-algorithm EAGLE \
516
+ --speculative-num-steps 3 \
517
+ --speculative-eagle-topk 1 \
518
+ --speculative-num-draft-tokens 4
519
+
520
+ ```
521
+
522
+ An OpenAI-compatible API server will be available at http://localhost:8000/v1.
523
+
524
+
525
+
526
+ ### Using EXAONE 4.5
527
+
528
+ After launching the OpenAI-compatible server with EXAONE 4.5, you can seamlessly use the model via API with a single code integration, even though the serving framework has changed. To use OpenAI Python SDK and following examples, you should install the `openai` library on your environment.
529
+
530
+
531
+ > [!IMPORTANT]
532
+ > To achieve the expected performance, we recommend using the following configurations:
533
+ > - We recommend to use `temperature=1.0`, `top_p=0.95`, `presence_penalty=1.5` for general purpose.
534
+ > - We recommend to use `temperature=0.6`, `top_p=0.95`, `presence_penalty=1.5`, `top_k=20` for OCR/document-related tasks, and Korean inputs.
535
+ > - We recommend to use `temperature=1.0`, `top_p=0.95` for text-only inputs.
536
+ > - Different from EXAONE-4.0, EXAONE 4.5 uses `enable_thinking=True` as default. Thus, you need to set `enable_thinking=False` when you want to use non-reasoning mode.
537
+ > - EXAONE 4.5 prefers using `\boxed{}` format to answer the question. We recommend using this format with the corresponding format instruction for better parsing accuracy.
538
+ >
539
+
540
+
541
+
542
+ You can easily try model's chat completions by using OpenAI Python SDK. For your server in local machine, you will need to change your `base_url` and `api_key` for the OpenAI client.
543
+
544
+
545
+ ### Image-Text QA
546
+
547
+ #### Reasoning mode
548
+
549
+ For tasks that require accurate results, you can run the EXAONE 4.5 model in reasoning mode as follows.
550
+
551
+ ```python
552
+ from openai import OpenAI
553
+
554
+ client = OpenAI(
555
+ base_url="http://localhost:8000/v1",
556
+ api_key="EMPTY",
557
+ )
558
+
559
+ messages = [
560
+ {
561
+ "role": "user",
562
+ "content": [
563
+ {
564
+ "type": "image_url",
565
+ "image_url": {
566
+ "url": "https://github.com/Aim-Highest/EXAONE-4.5/blob/main/assets/exaone45_input2.png?raw=true",
567
+ },
568
+ },
569
+ {
570
+ "type": "text",
571
+ "text": "How much larger is the model released in winter 2025 compared with the one released in summer 2024?",
572
+ },
573
+ ]
574
+ }
575
+ ]
576
+
577
+ response = client.chat.completions.create(
578
+ model="EXAONE-4.5-33B",
579
+ messages=messages,
580
+ max_tokens=32768,
581
+ temperature=1.0,
582
+ top_p=0.95,
583
+ presence_penalty=1.5,
584
+ extra_body={
585
+ "chat_template_kwargs": {
586
+ "enable_thinking": True, # default: True
587
+ }
588
+ },
589
+ )
590
+ print(response)
591
+ ```
592
+
593
+ #### Non-reasoning mode
594
+
595
+ For tasks where latency matters more than accuracy, you can run the EXAONE 4.5 model in non-reasoning mode as follows.
596
+
597
+ ```python
598
+ from openai import OpenAI
599
+
600
+ client = OpenAI(
601
+ base_url="http://localhost:8000/v1",
602
+ api_key="EMPTY",
603
+ )
604
+
605
+ messages = [
606
+ {
607
+ "role": "user",
608
+ "content": [
609
+ {
610
+ "type": "image_url",
611
+ "image_url": {
612
+ "url": "https://github.com/Aim-Highest/EXAONE-4.5/blob/main/assets/exaone45_input1.jpg?raw=true",
613
+ },
614
+ },
615
+ {
616
+ "type": "text",
617
+ "text": "What dish is the person preparing, and how is it made?",
618
+ },
619
+ ]
620
+ }
621
+ ]
622
+
623
+ response = client.chat.completions.create(
624
+ model="EXAONE-4.5-33B",
625
+ messages=messages,
626
+ max_tokens=32768,
627
+ temperature=1.0,
628
+ top_p=0.95,
629
+ presence_penalty=1.5,
630
+ extra_body={
631
+ "chat_template_kwargs": {
632
+ "enable_thinking": False, # default: True
633
+ }
634
+ },
635
+ )
636
+ print(response)
637
+
638
+ ```
639
+
640
+
641
+ ### Text-only QA
642
+
643
+ ```python
644
+ from openai import OpenAI
645
+
646
+ client = OpenAI(
647
+ base_url="http://localhost:8000/v1",
648
+ api_key="EMPTY",
649
+ )
650
+
651
+ messages = [
652
+ {
653
+ "role": "user",
654
+ "content": "Explain how useful you are.",
655
+ }
656
+ ]
657
+
658
+ response = client.chat.completions.create(
659
+ model="EXAONE-4.5-33B",
660
+ messages=messages,
661
+ max_tokens=32768,
662
+ temperature=1.0,
663
+ top_p=0.95,
664
+ extra_body={
665
+ "chat_template_kwargs": {
666
+ "enable_thinking": True, # default: True
667
+ }
668
+ },
669
+ )
670
+ print(response)
671
+
672
+ ```
673
+
674
+
675
+ ### Agentic Use
676
+
677
+ The following example demonstrates the agentic capability of EXAONE 4.5 for image-text inputs. You can use your own agents, skills, or other harnesses with the EXAONE 4.5 model.
678
+
679
+ ```python
680
+ # If needed:
681
+ # pip install langchain langchain-openai langchain-mcp-adapters
682
+ # curl -LsSf https://astral.sh/uv/install.sh | sh
683
+ # sudo apt-get update && sudo apt-get install -y nodejs npm
684
+
685
+ import os
686
+ import asyncio
687
+ from langchain_openai import ChatOpenAI
688
+ from langchain.agents import create_agent
689
+ from langchain_mcp_adapters.client import MultiServerMCPClient
690
+
691
+ def print_message(msg):
692
+ parts = msg.content if isinstance(msg.content, list) else [{"type": "text", "text": msg.content or ""}]
693
+ text_out, reasoning_out = [], []
694
+
695
+ for p in parts:
696
+ if isinstance(p, dict):
697
+ if p.get("type") in ("text", "output_text") and p.get("text"):
698
+ text_out.append(p["text"])
699
+ elif p.get("type") in ("reasoning", "reasoning_text") and p.get("text"):
700
+ reasoning_out.append(p["text"])
701
+
702
+ if reasoning_out:
703
+ print("\n[assistant_reasoning_content]")
704
+ print("\n".join(reasoning_out))
705
+ if text_out:
706
+ print("\n[assistant_content]")
707
+ print("\n".join(text_out))
708
+
709
+ async def main():
710
+ model = ChatOpenAI(
711
+ model="EXAONE-4.5-33B",
712
+ base_url="http://localhost:8000/v1",
713
+ api_key="EMPTY",
714
+ temperature=1.0,
715
+ model_kwargs={"top_p": 0.95},
716
+ )
717
+
718
+ client = MultiServerMCPClient({
719
+ "filesystem": {
720
+ "transport": "stdio",
721
+ "command": "npx",
722
+ "args": ["-y", "@modelcontextprotocol/server-filesystem", "/tmp"],
723
+ },
724
+ "fetch": {
725
+ "transport": "stdio",
726
+ "command": "uvx",
727
+ "args": ["mcp-server-fetch"],
728
+ },
729
+ "duckduckgo": {
730
+ "transport": "stdio",
731
+ "command": "uvx",
732
+ "args": ["duckduckgo-mcp-server"],
733
+ },
734
+ })
735
+
736
+ agent = create_agent(model, await client.get_tools())
737
+
738
+ inputs = {
739
+ "messages": [{
740
+ "role": "user",
741
+ "content": [
742
+ {
743
+ "type": "text",
744
+ "text": (
745
+ "Look at the image and identify the landmark. "
746
+ "Use the DuckDuckGo MCP tool to verify its name, height, and location. "
747
+ "Then use the fetch tool to read a fuller article page about it. "
748
+ "Create /tmp/mcp-demo and write a short markdown file to "
749
+ "/tmp/mcp-demo/landmark.md with: name, location, height, and a one-sentence summary of the article. "
750
+ "Finally, return only the exact file content."
751
+ ),
752
+ },
753
+ {
754
+ "type": "image_url",
755
+ "image_url": {
756
+ "url": "https://upload.wikimedia.org/wikipedia/commons/a/a8/Tour_Eiffel_Wikimedia_Commons.jpg"
757
+ },
758
+ },
759
+ ],
760
+ }]
761
+ }
762
+
763
+ async for step in agent.astream(inputs, stream_mode="values"):
764
+ msg = step["messages"][-1]
765
+ if getattr(msg, "type", "") == "ai":
766
+ print_message(msg)
767
+ for tc in getattr(msg, "tool_calls", []) or []:
768
+ print(f"\n[tool call] {tc['name']}({tc['args']})")
769
+
770
+ if __name__ == "__main__":
771
+ asyncio.run(main())
772
+
773
+ ```
774
+
775
+
776
+
777
+
778
+
779
+ ## Limitation
780
+
781
+ EXAONE 4.5 models, like all existing multimodal models, have certain limitations and may occasionally generate
782
+ inappropriate responses. The multimodal model generates responses based on the output probability of tokens, and it
783
+ is determined during learning from training data. While we make every effort to exclude personal, harmful, and biased
784
+ information from the training data, some problematic content may still be included, potentially leading to undesirable
785
+ responses. Please note that the text generated by EXAONE 4.5 models does not reflect the views of LG AI Research.
786
+
787
+ - Inappropriate answers may be generated, which contain personal, harmful or other inappropriate information.
788
+ - Biased responses may be generated, which are associated with age, gender, race, and so on.
789
+ - The generated responses rely heavily on statistics from the training data, which can result in the generation of
790
+ semantically or syntactically incorrect sentences.
791
+ - Since the models do not reflect the latest information, the responses may be false or contradictory.
792
+
793
+ LG AI Research strives to reduce potential risks that may arise from EXAONE 4.5 models. Users are not allowed to
794
+ engage in any malicious activities (e.g., keying in illegal information) that may induce the creation of inappropriate
795
+ outputs violating LG AI’s ethical principles when using EXAONE 4.5 models.
796
+
797
+
798
+
799
+ ## License
800
+
801
+ The model is licensed under [EXAONE AI Model License Agreement 1.2 - NC](./LICENSE)
802
+
803
+
804
+
805
+ ## Citation
806
+
807
+ ```
808
+ @article{exaone-4.5,
809
+ title={EXAONE 4.5 Technical Report},
810
+ author={{LG AI Research}},
811
+ journal={arXiv preprint arXiv:XXXX.XXXXX},
812
+ year={2026}
813
+ }
814
+ ```
815
+
816
+
817
+ ## Contact
818
+
819
+ LG AI Research Technical Support: contact_us@lgresearch.ai
820
+
assets/EXAONE_Symbol+BI_3d.png ADDED

Git LFS Details

  • SHA256: c473c63768e9303c02a4f968fd2e7d41df3f669fedc6a7b51c4398cfcd7f23e4
  • Pointer size: 131 Bytes
  • Size of remote file: 249 kB
assets/exaone45_input1.jpg ADDED

Git LFS Details

  • SHA256: 733d20981e4ebfd5881d6efbe57262a7c796c8eb9a95ea1e1fc7b8ff1554c629
  • Pointer size: 132 Bytes
  • Size of remote file: 1.72 MB
assets/exaone45_input2.png ADDED

Git LFS Details

  • SHA256: aebd28d0c75797a25734795eaa9e415203ab9a8edbd3f0b0ed09ee9e55c6157b
  • Pointer size: 131 Bytes
  • Size of remote file: 512 kB
chat_template.jinja ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% set image_count = namespace(value=0) %}
2
+ {% set video_count = namespace(value=0) %}
3
+
4
+ {%- set role_indicators = {
5
+ 'user': '<|user|>\n',
6
+ 'assistant': '<|assistant|>\n',
7
+ 'system': '<|system|>\n',
8
+ 'tool': '<|tool|>\n',
9
+ 'tool_declare': '<|tool_declare|>\n'
10
+ } %}
11
+ {%- set end_of_turn = '<|endofturn|>\n' %}
12
+
13
+
14
+ {%- macro declare_available_tools(tools) %}
15
+ {{- "# Tools" }}
16
+ {{- "\n" }}
17
+ {%- for tool in tools %}
18
+ {{- "<tool>" }}
19
+ {{- tool | tojson(ensure_ascii=False) | safe }}
20
+ {{- "</tool>\n" }}
21
+ {%- endfor %}
22
+ {%- endmacro %}
23
+
24
+
25
+ {%- set ns = namespace(last_query_index = messages|length - 1, last_query_index_not_yet_determined = true) %}
26
+ {%- for message in messages[::-1] %}
27
+ {%- set index = (messages|length - 1) - loop.index0 %}
28
+ {%- if ns.last_query_index_not_yet_determined and message.role == "user" and message.content is string %}
29
+ {%- set ns.last_query_index = index -%}
30
+ {%- set ns.last_query_index_not_yet_determined = false -%}
31
+ {%- endif %}
32
+ {%- endfor %}
33
+
34
+ {%- if tools is defined and tools %}
35
+ {{- role_indicators['tool_declare'] }}
36
+ {{- declare_available_tools(tools) }}
37
+ {{- end_of_turn -}}
38
+ {%- endif %}
39
+
40
+ {%- for i in range(messages | length) %}
41
+ {%- set msg = messages[i] %}
42
+ {%- set role = msg.role %}
43
+ {%- if role not in role_indicators %}
44
+ {{- raise_exception('Unknown role: ' ~ role) }}
45
+ {%- endif %}
46
+
47
+ {%- if i == 0 %}
48
+ {%- if role == 'system' %}
49
+ {{- role_indicators['system'] }}
50
+ {{- msg.content }}
51
+ {{- end_of_turn -}}
52
+ {%- continue %}
53
+ {%- endif %}
54
+ {%- endif %}
55
+
56
+ {%- if role == 'assistant' %}
57
+ {{- role_indicators['assistant'] }}
58
+
59
+ {%- set content = (msg.content if (msg.content is defined and msg.content) else "") -%}
60
+ {%- set reasoning = none -%}
61
+
62
+ {%- if msg.reasoning_content is defined and msg.reasoning_content%}
63
+ {%- set reasoning = msg.reasoning_content.strip() -%}
64
+ {%- elif content and "</think>" in content %}
65
+ {%- set _parts = content.split('</think>') -%}
66
+ {%- set reasoning = _parts[0].lstrip('<think>').strip() -%}
67
+ {%- set content = _parts[-1].strip() -%}
68
+ {%- endif %}
69
+
70
+ {%- if not (reasoning and i > ns.last_query_index) or (skip_think is defined and skip_think) %}
71
+ {%- set reasoning = none %}
72
+ {%- endif %}
73
+
74
+ {%- set content = content.strip() -%}
75
+
76
+ {{- "<think>\n" }}
77
+ {{- (reasoning if reasoning is not none else "") }}
78
+ {{- "\n</think>\n\n" }}
79
+
80
+ {{- content }}
81
+
82
+ {%- if msg.tool_calls %}
83
+ {%- if content is defined and content %}
84
+ {{- "\n" }}
85
+ {%- endif %}
86
+ {%- for tool_call in msg.tool_calls %}
87
+ {%- if tool_call.function is defined %}
88
+ {%- set tool_call = tool_call.function %}
89
+ {%- endif %}
90
+
91
+ {%- if tool_call.arguments is defined %}
92
+ {%- set arguments = tool_call.arguments %}
93
+ {%- elif tool_call.parameters is defined %}
94
+ {%- set arguments = tool_call.parameters %}
95
+ {%- else %}
96
+ {{- raise_exception('arguments or parameters are mandatory: ' ~ tool_call) }}
97
+ {%- endif %}
98
+ {%- if arguments is string %}
99
+ {{- "<tool_call>" }}{"name": "{{- tool_call.name }}", "arguments": {{ arguments }}}{{- "</tool_call>" }}
100
+ {%- else %}
101
+ {{- "<tool_call>" }}{"name": "{{- tool_call.name }}", "arguments": {{ arguments | tojson(ensure_ascii=False) | safe }}}{{- "</tool_call>" }}
102
+ {%- endif %}
103
+ {%- if not loop.last %}
104
+ {{- "\n" }}
105
+ {%- endif %}
106
+
107
+ {%- endfor %}
108
+ {%- endif %}
109
+ {{- end_of_turn -}}
110
+
111
+ {%- elif role == "tool" %}
112
+ {%- if i == 0 or messages[i - 1].role != "tool" %}
113
+ {{- role_indicators['tool'] }}
114
+ {%- endif %}
115
+ {%- if msg.content is defined %}
116
+ {%- if msg.content is string %}
117
+ {{- "<tool_result>" }}{{ msg.content }}{{- "</tool_result>" }}
118
+ {%- else %}
119
+ {{- "<tool_result>" }}{{ msg.content | tojson(ensure_ascii=False) | safe }}{{- "</tool_result>" }}
120
+ {%- endif %}
121
+ {%- endif %}
122
+ {%- if loop.last or messages[i + 1].role != "tool" %}
123
+ {{- end_of_turn -}}
124
+ {%- else %}
125
+ {{- "\n" }}
126
+ {%- endif %}
127
+
128
+ {%- else %}
129
+ {{- role_indicators[role] }}
130
+ {%- if msg.content is string %}
131
+ {{- msg.content }}
132
+ {%- else %}
133
+ {%- for content in msg.content %}
134
+ {%- if content.type == 'image' %}
135
+ {%- set image_count.value = image_count.value + 1 %}
136
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif %}
137
+ {{- "<vision><|image_pad|></vision>\n" }}
138
+ {%- elif content.type == 'video' %}
139
+ {%- set video_count.value = video_count.value + 1 %}
140
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif %}
141
+ {{- "<vision><|video_pad|></vision>\n" }}
142
+ {%- elif content.type == 'text' %}
143
+ {{- content.text }}
144
+ {%- else %}
145
+ {{- content.text }}
146
+ {%- endif %}
147
+ {%- endfor %}
148
+ {%- endif %}
149
+ {{- end_of_turn -}}
150
+ {%- endif %}
151
+ {% endfor %}
152
+
153
+
154
+ {%- if add_generation_prompt %}
155
+ {{- role_indicators['assistant'] }}
156
+ {%- if enable_thinking is not defined or enable_thinking is true %}
157
+ {{- "<think>\n" }}
158
+ {%- else %}
159
+ {{- "<think>\n\n</think>\n\n" }}
160
+ {%- endif %}
161
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Exaone4_5_ForConditionalGeneration"
4
+ ],
5
+ "image_token_id": 67,
6
+ "model_type": "exaone4_5",
7
+ "text_config": {
8
+ "_num_mtp_layers": 1,
9
+ "architectures": [
10
+ "Exaone4ForCausalLM"
11
+ ],
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 1,
14
+ "dtype": "bfloat16",
15
+ "eos_token_id": 53,
16
+ "hidden_act": "silu",
17
+ "hidden_size": 5120,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 27392,
20
+ "layer_types": [
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "sliding_attention",
24
+ "full_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "full_attention",
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "full_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "sliding_attention",
36
+ "full_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "full_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "sliding_attention",
48
+ "full_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "full_attention",
53
+ "sliding_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "full_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "sliding_attention",
60
+ "full_attention",
61
+ "sliding_attention",
62
+ "sliding_attention",
63
+ "sliding_attention",
64
+ "full_attention",
65
+ "sliding_attention",
66
+ "sliding_attention",
67
+ "sliding_attention",
68
+ "full_attention",
69
+ "sliding_attention",
70
+ "sliding_attention",
71
+ "sliding_attention",
72
+ "full_attention",
73
+ "sliding_attention",
74
+ "sliding_attention",
75
+ "sliding_attention",
76
+ "full_attention",
77
+ "sliding_attention",
78
+ "sliding_attention",
79
+ "sliding_attention",
80
+ "full_attention",
81
+ "sliding_attention",
82
+ "sliding_attention",
83
+ "sliding_attention",
84
+ "full_attention"
85
+ ],
86
+ "max_position_embeddings": 262144,
87
+ "num_nextn_predict_layers": 1,
88
+ "mtp_loss_scaling_factor": 0.05,
89
+ "mtp_share_layers": true,
90
+ "model_type": "exaone4_5_text",
91
+ "num_attention_heads": 40,
92
+ "num_hidden_layers": 64,
93
+ "num_key_value_heads": 8,
94
+ "pad_token_id": 0,
95
+ "rms_norm_eps": 1e-05,
96
+ "rope_scaling": {
97
+ "factor": 16.0,
98
+ "high_freq_factor": 4.0,
99
+ "low_freq_factor": 1.0,
100
+ "original_max_position_embeddings": 8192,
101
+ "rope_type": "llama3"
102
+ },
103
+ "rope_theta": 1000000.0,
104
+ "sliding_window": 4096,
105
+ "sliding_window_pattern": "LLLG",
106
+ "tie_word_embeddings": false,
107
+ "use_cache": true,
108
+ "vocab_size": 153600
109
+ },
110
+ "transformers_version": "5.3.0.dev0",
111
+ "video_token_id": 68,
112
+ "vision_config": {
113
+ "depth": 28,
114
+ "dtype": "bfloat16",
115
+ "fullatt_block_indexes": [
116
+ 6,
117
+ 13,
118
+ 20,
119
+ 27
120
+ ],
121
+ "hidden_act": "silu",
122
+ "hidden_size": 2048,
123
+ "in_channels": 3,
124
+ "initializer_range": 0.02,
125
+ "intermediate_size": 5120,
126
+ "model_type": "exaone4_5_vision",
127
+ "num_heads": 32,
128
+ "num_key_value_heads": 8,
129
+ "out_hidden_size": 5120,
130
+ "patch_size": 14,
131
+ "spatial_merge_size": 2,
132
+ "temporal_patch_size": 2,
133
+ "tokens_per_second": 2,
134
+ "window_size": 112
135
+ },
136
+ "vision_end_token_id": 74,
137
+ "vision_start_token_id": 73,
138
+ "vision_token_id": 67,
139
+ "vocab_size": 153600
140
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "do_sample": true,
5
+ "eos_token_id": 53,
6
+ "output_attentions": false,
7
+ "output_hidden_states": false,
8
+ "pad_token_id": 0,
9
+ "presence_penalty": 1.5,
10
+ "temperature": 1.0,
11
+ "top_p": 0.95,
12
+ "transformers_version": "5.3.0.dev0",
13
+ "use_cache": true
14
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:719196fefe7b934b0eb3f68d5f2550b9831dabf58f8829b8d9a88ea76a84ab66
3
+ size 49736389848
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b6322a2171b404e365b9f1bf1e8573f9da238c9cd6958c02e8709e914bd1a80
3
+ size 18963941248
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "Exaone4_5_ImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "input_data_format": null,
23
+ "max_pixels": 3211264,
24
+ "merge_size": 2,
25
+ "min_pixels": 3136,
26
+ "patch_size": 14,
27
+ "resample": 3,
28
+ "rescale_factor": 0.00392156862745098,
29
+ "return_tensors": null,
30
+ "size": {
31
+ "longest_edge": 3211264,
32
+ "shortest_edge": 3136
33
+ },
34
+ "temporal_patch_size": 2
35
+ }
processor_config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_processor": {
3
+ "crop_size": null,
4
+ "data_format": "channels_first",
5
+ "default_to_square": true,
6
+ "device": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "Exaone4_5_ImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "input_data_format": null,
24
+ "max_pixels": 3211264,
25
+ "merge_size": 2,
26
+ "min_pixels": 3136,
27
+ "patch_size": 14,
28
+ "resample": 3,
29
+ "rescale_factor": 0.00392156862745098,
30
+ "return_tensors": null,
31
+ "size": {
32
+ "longest_edge": 3211264,
33
+ "shortest_edge": 3136
34
+ },
35
+ "temporal_patch_size": 2
36
+ },
37
+ "processor_class": "Exaone4_5_Processor",
38
+ "video_processor": {
39
+ "data_format": "channels_first",
40
+ "default_to_square": true,
41
+ "do_convert_rgb": true,
42
+ "do_normalize": true,
43
+ "do_rescale": true,
44
+ "do_resize": true,
45
+ "do_sample_frames": false,
46
+ "image_mean": [
47
+ 0.48145466,
48
+ 0.4578275,
49
+ 0.40821073
50
+ ],
51
+ "image_processor_type": "Exaone4_5_ImageProcessor",
52
+ "image_std": [
53
+ 0.26862954,
54
+ 0.26130258,
55
+ 0.27577711
56
+ ],
57
+ "max_frames": 768,
58
+ "max_pixels": 3211264,
59
+ "merge_size": 2,
60
+ "min_frames": 4,
61
+ "min_pixels": 3136,
62
+ "patch_size": 14,
63
+ "resample": 3,
64
+ "rescale_factor": 0.00392156862745098,
65
+ "return_metadata": false,
66
+ "size": {
67
+ "longest_edge": 3211264,
68
+ "shortest_edge": 3136
69
+ },
70
+ "temporal_patch_size": 2,
71
+ "video_processor_type": "Exaone4_5_VideoProcessor"
72
+ }
73
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bd798efa30739e209d51f36cfc2f0a636711e37ad9d69b77e4e5c8ca5f09fab
3
+ size 12160205
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "[BOS]",
4
+ "clean_up_tokenization_spaces": false,
5
+ "eos_token": "<|endofturn|>",
6
+ "errors": "replace",
7
+ "is_local": true,
8
+ "model_input_names": [
9
+ "input_ids",
10
+ "attention_mask"
11
+ ],
12
+ "model_max_length": 1000000000000000019884624838656,
13
+ "model_specific_special_tokens": {},
14
+ "pad_token": "[PAD]",
15
+ "padding_side": "right",
16
+ "processor_class": "Exaone4_5_Processor",
17
+ "split_special_tokens": false,
18
+ "tokenizer_class": "TokenizersBackend",
19
+ "unk_token": "[UNK]"
20
+ }