Text Generation
Transformers
Safetensors
Korean
English
qwen3_5
image-text-to-text
korean
multimodal
qwen3.5
28b
k-ai-leaderboard
tenos
conversational
Instructions to use honey90/TenOS-Ko-28B with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use honey90/TenOS-Ko-28B with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="honey90/TenOS-Ko-28B") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] pipe(text=messages)# Load model directly from transformers import AutoProcessor, AutoModelForImageTextToText processor = AutoProcessor.from_pretrained("honey90/TenOS-Ko-28B") model = AutoModelForImageTextToText.from_pretrained("honey90/TenOS-Ko-28B") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use honey90/TenOS-Ko-28B with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "honey90/TenOS-Ko-28B" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "honey90/TenOS-Ko-28B", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/honey90/TenOS-Ko-28B
- SGLang
How to use honey90/TenOS-Ko-28B with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "honey90/TenOS-Ko-28B" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "honey90/TenOS-Ko-28B", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "honey90/TenOS-Ko-28B" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "honey90/TenOS-Ko-28B", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use honey90/TenOS-Ko-28B with Docker Model Runner:
docker model run hf.co/honey90/TenOS-Ko-28B
Initial release: TenOS-Ko-28B - TenAI Korean-specialized 28B model with K-AI domain SFT
Browse files- .gitattributes +1 -0
- README.md +217 -0
- chat_template.jinja +91 -0
- config.json +145 -0
- generation_config.json +13 -0
- model-00001-of-00012.safetensors +3 -0
- model-00002-of-00012.safetensors +3 -0
- model-00003-of-00012.safetensors +3 -0
- model-00004-of-00012.safetensors +3 -0
- model-00005-of-00012.safetensors +3 -0
- model-00006-of-00012.safetensors +3 -0
- model-00007-of-00012.safetensors +3 -0
- model-00008-of-00012.safetensors +3 -0
- model-00009-of-00012.safetensors +3 -0
- model-00010-of-00012.safetensors +3 -0
- model-00011-of-00012.safetensors +3 -0
- model-00012-of-00012.safetensors +3 -0
- model-visual-extra.safetensors +3 -0
- model.safetensors.index.json +0 -0
- preprocessor_config.json +21 -0
- tokenizer.json +3 -0
- tokenizer_config.json +33 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
language:
|
| 4 |
+
- ko
|
| 5 |
+
- en
|
| 6 |
+
base_model:
|
| 7 |
+
- FINAL-Bench/Darwin-28B-KR
|
| 8 |
+
pipeline_tag: text-generation
|
| 9 |
+
tags:
|
| 10 |
+
- korean
|
| 11 |
+
- multimodal
|
| 12 |
+
- qwen3.5
|
| 13 |
+
- 28b
|
| 14 |
+
- k-ai-leaderboard
|
| 15 |
+
- tenos
|
| 16 |
+
library_name: transformers
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
# TenOS-Ko-28B
|
| 20 |
+
|
| 21 |
+
> **TenAI 한국어 특화 28B 멀티모달 언어 모델**
|
| 22 |
+
> K-AI 리더보드 평가 항목(KMMLU-Pro / CLIcK / MuSR(Ko) / Com2-main(ko))에 최적화된 한국어 모델
|
| 23 |
+
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
## 🎯 모델 소개
|
| 27 |
+
|
| 28 |
+
**TenOS-Ko-28B**는 TenAI가 개발한 한국어 특화 28B 파라미터 멀티모달 언어 모델입니다.
|
| 29 |
+
|
| 30 |
+
K-AI 리더보드 평가 항목들에 대해 한국어 추론·이해 능력을 강화하도록 도메인 SFT(Supervised Fine-Tuning)를 적용하여 만들어진 모델입니다. 한국어 표현, 한국 문화·역사·법률·일반 상식 등에 강점이 있으며, 멀티모달(이미지·비디오) 입력도 지원합니다.
|
| 31 |
+
|
| 32 |
+
---
|
| 33 |
+
|
| 34 |
+
## 🧬 계보 (Lineage)
|
| 35 |
+
|
| 36 |
+
```
|
| 37 |
+
Qwen3.5-27B (Alibaba Qwen team)
|
| 38 |
+
|
|
| 39 |
+
v
|
| 40 |
+
Darwin family 28B 한국어 특화 2세대 모체
|
| 41 |
+
(FINAL-Bench/Darwin-28B-KR — Apache 2.0)
|
| 42 |
+
|
|
| 43 |
+
| TenAI K-AI 도메인 SFT (LoRA r=16)
|
| 44 |
+
| 대상: KMMLU-Pro / CLIcK / MuSR(Ko) / Com2-main(ko)
|
| 45 |
+
v
|
| 46 |
+
TenOS-Ko-28B <- this model
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
베이스 모델인 **Darwin-28B-KR**은 한국어 표현 능력에 특화된 28B 모델로, 그 위에 K-AI 평가 도메인 데이터로 미세조정하여 TenOS-Ko-28B를 완성했습니다.
|
| 50 |
+
|
| 51 |
+
---
|
| 52 |
+
|
| 53 |
+
## ⚙️ 능력 매트릭스
|
| 54 |
+
|
| 55 |
+
| 능력 | 강도 |
|
| 56 |
+
|---|---|
|
| 57 |
+
| 한국어 이해/생성 | ⭐⭐⭐⭐⭐ |
|
| 58 |
+
| 한국어 추론 (CSAT/PSAT/K-AI 평가) | ⭐⭐⭐⭐⭐ |
|
| 59 |
+
| 한국 문화·역사·법률 지식 | ⭐⭐⭐⭐⭐ |
|
| 60 |
+
| 영어 추론 | ⭐⭐⭐⭐ |
|
| 61 |
+
| 멀티모달 (이미지/비디오) | ⭐⭐⭐⭐ |
|
| 62 |
+
| 영한 코드스위칭 | ⭐⭐⭐⭐ |
|
| 63 |
+
|
| 64 |
+
---
|
| 65 |
+
|
| 66 |
+
## 📊 K-AI 리더보드 CLIcK 비교
|
| 67 |
+
|
| 68 |
+
자체 측정 + 공개 리더보드 점수 비교:
|
| 69 |
+
|
| 70 |
+
| 모델 | CLIcK |
|
| 71 |
+
|---|---|
|
| 72 |
+
| QuettaLLMs-27B-Koreasoner-V3 | 0.794 |
|
| 73 |
+
| Rogue-27B-KR | 0.791 |
|
| 74 |
+
| Darwin-28B-KR (베이스) | 0.786 |
|
| 75 |
+
| AWAXIS-Think-28B | 0.770 |
|
| 76 |
+
| **TenOS-Ko-28B** | **0.770** |
|
| 77 |
+
|
| 78 |
+
(* 200문제 quick CLIcK 평가 기준. 실제 K-AI 리더보드 정식 평가에서는 ±2pp 변동 가능)
|
| 79 |
+
|
| 80 |
+
**참고**: K-AI 리더보드의 진가는 4개 항목(KMMLU-Pro/CLIcK/MuSR/Com2-main) 평균에서 드러나며, TenOS-Ko-28B는 CLIcK 외 항목에서 추가 향상이 기대됩니다.
|
| 81 |
+
|
| 82 |
+
---
|
| 83 |
+
|
| 84 |
+
## 🛠️ 학습 정보
|
| 85 |
+
|
| 86 |
+
| 항목 | 값 |
|
| 87 |
+
|---|---|
|
| 88 |
+
| 베이스 모델 | FINAL-Bench/Darwin-28B-KR (Apache 2.0) |
|
| 89 |
+
| 학습 방법 | LoRA (r=16, alpha=32) |
|
| 90 |
+
| LoRA target | Attention + Embedding + LM head |
|
| 91 |
+
| 학습 데이터 | K-AI 도메인 합성 데이터 + 정체성 학습 데이터 |
|
| 92 |
+
| 학습량 | 1 epoch (batch 1, grad_accum 16) |
|
| 93 |
+
| Optimizer | AdamW (lr=5e-5, cosine schedule) |
|
| 94 |
+
| Format | bfloat16 |
|
| 95 |
+
|
| 96 |
+
---
|
| 97 |
+
|
| 98 |
+
## 📊 사양
|
| 99 |
+
|
| 100 |
+
| 항목 | 값 |
|
| 101 |
+
|---|---|
|
| 102 |
+
| Architecture | Qwen3_5ForConditionalGeneration (hybrid full + linear attention) |
|
| 103 |
+
| Parameters | ~28B |
|
| 104 |
+
| Hidden size | 5120 |
|
| 105 |
+
| Layers | 64 |
|
| 106 |
+
| Vocab size | 248,320 |
|
| 107 |
+
| Format | bfloat16 (~53 GB on disk) |
|
| 108 |
+
| Context | 8K~32K (배포 환경 따라) |
|
| 109 |
+
|
| 110 |
+
---
|
| 111 |
+
|
| 112 |
+
## 🚀 사용법
|
| 113 |
+
|
| 114 |
+
### vLLM (권장)
|
| 115 |
+
|
| 116 |
+
```bash
|
| 117 |
+
vllm serve TenAI/TenOS-Ko-28B \
|
| 118 |
+
--trust-remote-code \
|
| 119 |
+
--port 8000 \
|
| 120 |
+
--enforce-eager \
|
| 121 |
+
--max-model-len 8192 \
|
| 122 |
+
--gpu-memory-utilization 0.85
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
### OpenAI 호환 클라이언트
|
| 126 |
+
|
| 127 |
+
```python
|
| 128 |
+
from openai import OpenAI
|
| 129 |
+
|
| 130 |
+
client = OpenAI(base_url="http://localhost:8000/v1", api_key="EMPTY")
|
| 131 |
+
response = client.chat.completions.create(
|
| 132 |
+
model="TenAI/TenOS-Ko-28B",
|
| 133 |
+
messages=[
|
| 134 |
+
{"role": "user", "content": "한국의 광복절은 무엇을 기념하는 날인가요?"}
|
| 135 |
+
],
|
| 136 |
+
max_tokens=2048,
|
| 137 |
+
temperature=0.0,
|
| 138 |
+
)
|
| 139 |
+
print(response.choices[0].message.content)
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
### transformers (직접 로드)
|
| 143 |
+
|
| 144 |
+
```python
|
| 145 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 146 |
+
import torch
|
| 147 |
+
|
| 148 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 149 |
+
"TenAI/TenOS-Ko-28B",
|
| 150 |
+
torch_dtype=torch.bfloat16,
|
| 151 |
+
device_map="auto",
|
| 152 |
+
trust_remote_code=True
|
| 153 |
+
)
|
| 154 |
+
tokenizer = AutoTokenizer.from_pretrained("TenAI/TenOS-Ko-28B", trust_remote_code=True)
|
| 155 |
+
|
| 156 |
+
messages = [{"role": "user", "content": "한국어로 자기소개 해주세요"}]
|
| 157 |
+
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
|
| 158 |
+
outputs = model.generate(inputs, max_new_tokens=512, temperature=0.0)
|
| 159 |
+
print(tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True))
|
| 160 |
+
```
|
| 161 |
+
|
| 162 |
+
---
|
| 163 |
+
|
| 164 |
+
## 🖥️ 하드웨어 요구사항
|
| 165 |
+
|
| 166 |
+
| GPU 시리즈 | 상태 |
|
| 167 |
+
|---|---|
|
| 168 |
+
| NVIDIA Blackwell (B200) | ✅ Best |
|
| 169 |
+
| NVIDIA Hopper (H100/H200) | ✅ 권장 |
|
| 170 |
+
| NVIDIA Ada (L40S) | ⚠️ 빠듯함 (53GB BF16) |
|
| 171 |
+
| Older Ampere | ❌ VRAM 부족 |
|
| 172 |
+
|
| 173 |
+
**최소 VRAM**: ~55 GB (BF16 추론용)
|
| 174 |
+
|
| 175 |
+
---
|
| 176 |
+
|
| 177 |
+
## 💬 자기소개 예시
|
| 178 |
+
|
| 179 |
+
모델은 다음과 같이 자신을 소개합니다:
|
| 180 |
+
|
| 181 |
+
```
|
| 182 |
+
User: 당신은 누구인가요?
|
| 183 |
+
TenOS-Ko-28B: 저는 TenAI가 개발한 TenOS-Ko-28B���니다.
|
| 184 |
+
한국어에 특화된 280억 파라미터 규모의 언어 모델로,
|
| 185 |
+
다양한 질문과 대화에 도움을 드릴 수 있습니다.
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
---
|
| 189 |
+
|
| 190 |
+
## 🌳 활용 예시
|
| 191 |
+
|
| 192 |
+
- **한국어 일반 대화 / Q&A**
|
| 193 |
+
- **한국 문화·역사·법률 지식 응답**
|
| 194 |
+
- **K-AI 리더보드 항목 추론** (KMMLU-Pro / CLIcK / MuSR / Com2-main)
|
| 195 |
+
- **영한 번역 / 코드스위칭**
|
| 196 |
+
- **이미지/비디오 분석 + 한국어 설명**
|
| 197 |
+
- **한국어 글쓰기 / 요약 / 창작**
|
| 198 |
+
|
| 199 |
+
---
|
| 200 |
+
|
| 201 |
+
## 🙏 Credits
|
| 202 |
+
|
| 203 |
+
- Architecture: Qwen3.5 (Alibaba Qwen team)
|
| 204 |
+
- Base model: [FINAL-Bench/Darwin-28B-KR](https://huggingface.co/FINAL-Bench/Darwin-28B-KR) (Apache 2.0)
|
| 205 |
+
- Fine-tuning: TenAI
|
| 206 |
+
|
| 207 |
+
---
|
| 208 |
+
|
| 209 |
+
## 📜 License
|
| 210 |
+
|
| 211 |
+
Apache 2.0 (베이스 모델로부터 상속)
|
| 212 |
+
|
| 213 |
+
---
|
| 214 |
+
|
| 215 |
+
## 📞 문의
|
| 216 |
+
|
| 217 |
+
모델에 대한 문의나 협업 제안은 HuggingFace 페이지를 통해 연락 부탁드립니다.
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- if not messages or messages[0].role != "system" %}
|
| 2 |
+
{{- "<|im_start|>system\n당신은 TenAI가 개발한 TenOS-Ko-28B입니다.<|im_end|>\n" -}}
|
| 3 |
+
{%- endif %}
|
| 4 |
+
{%- if tools %}
|
| 5 |
+
{{- '<|im_start|>system\n' }}
|
| 6 |
+
{%- if messages[0].role == 'system' %}
|
| 7 |
+
{{- messages[0].content + '\n\n' }}
|
| 8 |
+
{%- endif %}
|
| 9 |
+
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 10 |
+
{%- for tool in tools %}
|
| 11 |
+
{{- "\n" }}
|
| 12 |
+
{{- tool | tojson }}
|
| 13 |
+
{%- endfor %}
|
| 14 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 15 |
+
{%- else %}
|
| 16 |
+
{%- if messages[0].role == 'system' %}
|
| 17 |
+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
|
| 18 |
+
{%- endif %}
|
| 19 |
+
{%- endif %}
|
| 20 |
+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
|
| 21 |
+
{%- for message in messages[::-1] %}
|
| 22 |
+
{%- set index = (messages|length - 1) - loop.index0 %}
|
| 23 |
+
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
|
| 24 |
+
{%- set ns.multi_step_tool = false %}
|
| 25 |
+
{%- set ns.last_query_index = index %}
|
| 26 |
+
{%- endif %}
|
| 27 |
+
{%- endfor %}
|
| 28 |
+
{%- for message in messages %}
|
| 29 |
+
{%- if message.content is string %}
|
| 30 |
+
{%- set content = message.content %}
|
| 31 |
+
{%- else %}
|
| 32 |
+
{%- set content = '' %}
|
| 33 |
+
{%- endif %}
|
| 34 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
|
| 35 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
| 36 |
+
{%- elif message.role == "assistant" %}
|
| 37 |
+
{%- set reasoning_content = '' %}
|
| 38 |
+
{%- if message.reasoning_content is string %}
|
| 39 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 40 |
+
{%- else %}
|
| 41 |
+
{%- if '</think>' in content %}
|
| 42 |
+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
| 43 |
+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
|
| 44 |
+
{%- endif %}
|
| 45 |
+
{%- endif %}
|
| 46 |
+
{%- if loop.index0 > ns.last_query_index %}
|
| 47 |
+
{%- if loop.last or (not loop.last and reasoning_content) %}
|
| 48 |
+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
|
| 49 |
+
{%- else %}
|
| 50 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 51 |
+
{%- endif %}
|
| 52 |
+
{%- else %}
|
| 53 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 54 |
+
{%- endif %}
|
| 55 |
+
{%- if message.tool_calls %}
|
| 56 |
+
{%- for tool_call in message.tool_calls %}
|
| 57 |
+
{%- if (loop.first and content) or (not loop.first) %}
|
| 58 |
+
{{- '\n' }}
|
| 59 |
+
{%- endif %}
|
| 60 |
+
{%- if tool_call.function %}
|
| 61 |
+
{%- set tool_call = tool_call.function %}
|
| 62 |
+
{%- endif %}
|
| 63 |
+
{{- '<tool_call>\n{"name": "' }}
|
| 64 |
+
{{- tool_call.name }}
|
| 65 |
+
{{- '", "arguments": ' }}
|
| 66 |
+
{%- if tool_call.arguments is string %}
|
| 67 |
+
{{- tool_call.arguments }}
|
| 68 |
+
{%- else %}
|
| 69 |
+
{{- tool_call.arguments | tojson }}
|
| 70 |
+
{%- endif %}
|
| 71 |
+
{{- '}\n</tool_call>' }}
|
| 72 |
+
{%- endfor %}
|
| 73 |
+
{%- endif %}
|
| 74 |
+
{{- '<|im_end|>\n' }}
|
| 75 |
+
{%- elif message.role == "tool" %}
|
| 76 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 77 |
+
{{- '<|im_start|>user' }}
|
| 78 |
+
{%- endif %}
|
| 79 |
+
{{- '\n<tool_response>\n' }}
|
| 80 |
+
{{- content }}
|
| 81 |
+
{{- '\n</tool_response>' }}
|
| 82 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 83 |
+
{{- '<|im_end|>\n' }}
|
| 84 |
+
{%- endif %}
|
| 85 |
+
{%- endif %}
|
| 86 |
+
{%- endfor %}
|
| 87 |
+
{%- if add_generation_prompt %}
|
| 88 |
+
{{- '<|im_start|>assistant
|
| 89 |
+
<think>
|
| 90 |
+
' }}
|
| 91 |
+
{%- endif %}
|
config.json
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen3_5ForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"bos_token_id": null,
|
| 6 |
+
"torch_dtype": "bfloat16",
|
| 7 |
+
"eos_token_id": 248046,
|
| 8 |
+
"image_token_id": 248056,
|
| 9 |
+
"language_model_only": false,
|
| 10 |
+
"model_type": "qwen3_5",
|
| 11 |
+
"pad_token_id": 248055,
|
| 12 |
+
"text_config": {
|
| 13 |
+
"attention_bias": false,
|
| 14 |
+
"attention_dropout": 0.0,
|
| 15 |
+
"attn_output_gate": true,
|
| 16 |
+
"bos_token_id": 248044,
|
| 17 |
+
"torch_dtype": "bfloat16",
|
| 18 |
+
"eos_token_id": 248044,
|
| 19 |
+
"full_attention_interval": 4,
|
| 20 |
+
"head_dim": 256,
|
| 21 |
+
"hidden_act": "silu",
|
| 22 |
+
"hidden_size": 5120,
|
| 23 |
+
"initializer_range": 0.02,
|
| 24 |
+
"intermediate_size": 17408,
|
| 25 |
+
"layer_types": [
|
| 26 |
+
"linear_attention",
|
| 27 |
+
"linear_attention",
|
| 28 |
+
"linear_attention",
|
| 29 |
+
"full_attention",
|
| 30 |
+
"linear_attention",
|
| 31 |
+
"linear_attention",
|
| 32 |
+
"linear_attention",
|
| 33 |
+
"full_attention",
|
| 34 |
+
"linear_attention",
|
| 35 |
+
"linear_attention",
|
| 36 |
+
"linear_attention",
|
| 37 |
+
"full_attention",
|
| 38 |
+
"linear_attention",
|
| 39 |
+
"linear_attention",
|
| 40 |
+
"linear_attention",
|
| 41 |
+
"full_attention",
|
| 42 |
+
"linear_attention",
|
| 43 |
+
"linear_attention",
|
| 44 |
+
"linear_attention",
|
| 45 |
+
"full_attention",
|
| 46 |
+
"linear_attention",
|
| 47 |
+
"linear_attention",
|
| 48 |
+
"linear_attention",
|
| 49 |
+
"full_attention",
|
| 50 |
+
"linear_attention",
|
| 51 |
+
"linear_attention",
|
| 52 |
+
"linear_attention",
|
| 53 |
+
"full_attention",
|
| 54 |
+
"linear_attention",
|
| 55 |
+
"linear_attention",
|
| 56 |
+
"linear_attention",
|
| 57 |
+
"full_attention",
|
| 58 |
+
"linear_attention",
|
| 59 |
+
"linear_attention",
|
| 60 |
+
"linear_attention",
|
| 61 |
+
"full_attention",
|
| 62 |
+
"linear_attention",
|
| 63 |
+
"linear_attention",
|
| 64 |
+
"linear_attention",
|
| 65 |
+
"full_attention",
|
| 66 |
+
"linear_attention",
|
| 67 |
+
"linear_attention",
|
| 68 |
+
"linear_attention",
|
| 69 |
+
"full_attention",
|
| 70 |
+
"linear_attention",
|
| 71 |
+
"linear_attention",
|
| 72 |
+
"linear_attention",
|
| 73 |
+
"full_attention",
|
| 74 |
+
"linear_attention",
|
| 75 |
+
"linear_attention",
|
| 76 |
+
"linear_attention",
|
| 77 |
+
"full_attention",
|
| 78 |
+
"linear_attention",
|
| 79 |
+
"linear_attention",
|
| 80 |
+
"linear_attention",
|
| 81 |
+
"full_attention",
|
| 82 |
+
"linear_attention",
|
| 83 |
+
"linear_attention",
|
| 84 |
+
"linear_attention",
|
| 85 |
+
"full_attention",
|
| 86 |
+
"linear_attention",
|
| 87 |
+
"linear_attention",
|
| 88 |
+
"linear_attention",
|
| 89 |
+
"full_attention"
|
| 90 |
+
],
|
| 91 |
+
"linear_conv_kernel_dim": 4,
|
| 92 |
+
"linear_key_head_dim": 128,
|
| 93 |
+
"linear_num_key_heads": 16,
|
| 94 |
+
"linear_num_value_heads": 48,
|
| 95 |
+
"linear_value_head_dim": 128,
|
| 96 |
+
"mamba_ssm_dtype": "float32",
|
| 97 |
+
"max_position_embeddings": 262144,
|
| 98 |
+
"model_type": "qwen3_5_text",
|
| 99 |
+
"mtp_num_hidden_layers": 1,
|
| 100 |
+
"mtp_use_dedicated_embeddings": false,
|
| 101 |
+
"num_attention_heads": 24,
|
| 102 |
+
"num_hidden_layers": 64,
|
| 103 |
+
"num_key_value_heads": 4,
|
| 104 |
+
"output_gate_type": "swish",
|
| 105 |
+
"pad_token_id": null,
|
| 106 |
+
"partial_rotary_factor": 0.25,
|
| 107 |
+
"rms_norm_eps": 1e-06,
|
| 108 |
+
"rope_parameters": {
|
| 109 |
+
"mrope_interleaved": true,
|
| 110 |
+
"mrope_section": [
|
| 111 |
+
11,
|
| 112 |
+
11,
|
| 113 |
+
10
|
| 114 |
+
],
|
| 115 |
+
"partial_rotary_factor": 0.25,
|
| 116 |
+
"rope_theta": 10000000,
|
| 117 |
+
"rope_type": "default"
|
| 118 |
+
},
|
| 119 |
+
"tie_word_embeddings": false,
|
| 120 |
+
"use_cache": true,
|
| 121 |
+
"vocab_size": 248320
|
| 122 |
+
},
|
| 123 |
+
"tie_word_embeddings": false,
|
| 124 |
+
"use_cache": false,
|
| 125 |
+
"video_token_id": 248057,
|
| 126 |
+
"vision_config": {
|
| 127 |
+
"deepstack_visual_indexes": [],
|
| 128 |
+
"depth": 27,
|
| 129 |
+
"torch_dtype": "bfloat16",
|
| 130 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 131 |
+
"hidden_size": 1152,
|
| 132 |
+
"in_channels": 3,
|
| 133 |
+
"initializer_range": 0.02,
|
| 134 |
+
"intermediate_size": 4304,
|
| 135 |
+
"model_type": "qwen3_5",
|
| 136 |
+
"num_heads": 16,
|
| 137 |
+
"num_position_embeddings": 2304,
|
| 138 |
+
"out_hidden_size": 5120,
|
| 139 |
+
"patch_size": 16,
|
| 140 |
+
"spatial_merge_size": 2,
|
| 141 |
+
"temporal_patch_size": 2
|
| 142 |
+
},
|
| 143 |
+
"vision_end_token_id": 248054,
|
| 144 |
+
"vision_start_token_id": 248053
|
| 145 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 248044,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
248046,
|
| 6 |
+
248044
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 248044,
|
| 9 |
+
"temperature": 1.0,
|
| 10 |
+
"top_k": 20,
|
| 11 |
+
"top_p": 0.95,
|
| 12 |
+
"transformers_version": "5.5.4"
|
| 13 |
+
}
|
model-00001-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f87e1137ef7cb72f192091c6f59d78b7aa686105fd34b428f81083b3a1ce0a6
|
| 3 |
+
size 2542796928
|
model-00002-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f94ddab7b04fa8fd46e3be5bc831101a771518ddabd9459a9717a5e0af153bda
|
| 3 |
+
size 4842451920
|
model-00003-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea222d043ae34ce1bb1080b5223199ce8ac0255b36a001404dd530a7a85de792
|
| 3 |
+
size 4965227944
|
model-00004-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fcdb470ab8e8151a5109b1c5f6fe4e9af293ffa3c4fb0ce02914982f6e0bf849
|
| 3 |
+
size 4912819264
|
model-00005-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f6becf95d28b440ddf153e1d0ee788a454c7071eb0ff595933d30242f0c1410
|
| 3 |
+
size 4986198544
|
model-00006-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9cb279e0f291ab75a4f57bfda3bef9127f5dd09bb612853683182780d06a497a
|
| 3 |
+
size 4912819320
|
model-00007-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3855f146f563e704acea44d1a6be6d004549d15a138811288e2657422a15471
|
| 3 |
+
size 4932703272
|
model-00008-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:283eb7d566ac3623f4c8f2040f99c0dce4f70e2096a5d1ffcca6239ef9a5243c
|
| 3 |
+
size 4966314576
|
model-00009-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8609804498fe10c3d9b1b317c30279d2567e48515491108c5bfb9a53e4b93824
|
| 3 |
+
size 4964162248
|
model-00010-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20d381735fdd79ae42a8a1af0a4bfe13b59a5f489623f1922ad750cea453233d
|
| 3 |
+
size 4933789824
|
model-00011-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64d9a7fffe64188dcbd6fccde9b4a3ba830a4dde37a6ac31ab11edeaa0c7104b
|
| 3 |
+
size 4965228032
|
model-00012-of-00012.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d9dda8f30412cbc6dc7b0d291a0b1930568b3cad0032b7eb9665141c45ac147
|
| 3 |
+
size 1867596944
|
model-visual-extra.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b9b2f8c7868a88a91c2b5cefda596f7889c68259eebc62b2d7732937ea7ae5f
|
| 3 |
+
size 921497200
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"size": {
|
| 3 |
+
"longest_edge": 16777216,
|
| 4 |
+
"shortest_edge": 65536
|
| 5 |
+
},
|
| 6 |
+
"patch_size": 16,
|
| 7 |
+
"temporal_patch_size": 2,
|
| 8 |
+
"merge_size": 2,
|
| 9 |
+
"image_mean": [
|
| 10 |
+
0.5,
|
| 11 |
+
0.5,
|
| 12 |
+
0.5
|
| 13 |
+
],
|
| 14 |
+
"image_std": [
|
| 15 |
+
0.5,
|
| 16 |
+
0.5,
|
| 17 |
+
0.5
|
| 18 |
+
],
|
| 19 |
+
"processor_class": "Qwen3VLProcessor",
|
| 20 |
+
"image_processor_type": "Qwen2VLImageProcessorFast"
|
| 21 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:87a7830d63fcf43bf241c3c5242e96e62dd3fdc29224ca26fed8ea333db72de4
|
| 3 |
+
size 19989343
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"audio_bos_token": "<|audio_start|>",
|
| 4 |
+
"audio_eos_token": "<|audio_end|>",
|
| 5 |
+
"audio_token": "<|audio_pad|>",
|
| 6 |
+
"backend": "tokenizers",
|
| 7 |
+
"bos_token": null,
|
| 8 |
+
"clean_up_tokenization_spaces": false,
|
| 9 |
+
"eos_token": "<|im_end|>",
|
| 10 |
+
"errors": "replace",
|
| 11 |
+
"image_token": "<|image_pad|>",
|
| 12 |
+
"is_local": true,
|
| 13 |
+
"model_max_length": 262144,
|
| 14 |
+
"model_specific_special_tokens": {
|
| 15 |
+
"audio_bos_token": "<|audio_start|>",
|
| 16 |
+
"audio_eos_token": "<|audio_end|>",
|
| 17 |
+
"audio_token": "<|audio_pad|>",
|
| 18 |
+
"image_token": "<|image_pad|>",
|
| 19 |
+
"video_token": "<|video_pad|>",
|
| 20 |
+
"vision_bos_token": "<|vision_start|>",
|
| 21 |
+
"vision_eos_token": "<|vision_end|>"
|
| 22 |
+
},
|
| 23 |
+
"pad_token": "<|vision_pad|>",
|
| 24 |
+
"padding_side": "right",
|
| 25 |
+
"pretokenize_regex": "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?[\\p{L}\\p{M}]+|\\p{N}| ?[^\\s\\p{L}\\p{M}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
| 26 |
+
"processor_class": "Qwen3VLProcessor",
|
| 27 |
+
"split_special_tokens": false,
|
| 28 |
+
"tokenizer_class": "TokenizersBackend",
|
| 29 |
+
"unk_token": null,
|
| 30 |
+
"video_token": "<|video_pad|>",
|
| 31 |
+
"vision_bos_token": "<|vision_start|>",
|
| 32 |
+
"vision_eos_token": "<|vision_end|>"
|
| 33 |
+
}
|