Commit ·
5f27182
1
Parent(s): eb8b68b
update readme (#1)
Browse files- update readme (0c299a8031886bd1e7910eaf95570f2328eb4f48)
Co-authored-by: RussellFeng <russellfeng@users.noreply.huggingface.co>
- README.md +6 -6
- README_CN.md +5 -5
README.md
CHANGED
|
@@ -162,7 +162,7 @@ from openai import OpenAI
|
|
| 162 |
client = OpenAI(base_url="http://localhost:8000/v1", api_key="EMPTY")
|
| 163 |
|
| 164 |
response = client.chat.completions.create(
|
| 165 |
-
model="
|
| 166 |
messages=[
|
| 167 |
{"role": "user", "content": "Hello! Can you briefly introduce yourself?"},
|
| 168 |
],
|
|
@@ -198,14 +198,14 @@ uv pip install --editable . --torch-backend=auto
|
|
| 198 |
Start the vLLM server with MTP enabled:
|
| 199 |
|
| 200 |
```bash
|
| 201 |
-
vllm serve tencent/Hy3-preview \
|
| 202 |
--tensor-parallel-size 8 \
|
| 203 |
--speculative-config.method mtp \
|
| 204 |
--speculative-config.num_speculative_tokens 1 \
|
| 205 |
--tool-call-parser hy_v3 \
|
| 206 |
--reasoning-parser hy_v3 \
|
| 207 |
--enable-auto-tool-choice \
|
| 208 |
-
--served-model-name hy3-preview
|
| 209 |
```
|
| 210 |
|
| 211 |
### SGLang
|
|
@@ -223,7 +223,7 @@ Launch SGLang server with MTP enabled:
|
|
| 223 |
|
| 224 |
```bash
|
| 225 |
python3 -m sglang.launch_server \
|
| 226 |
-
--model tencent/Hy3-preview \
|
| 227 |
--tp 8 \
|
| 228 |
--tool-call-parser hunyuan \
|
| 229 |
--reasoning-parser hunyuan \
|
|
@@ -231,7 +231,7 @@ python3 -m sglang.launch_server \
|
|
| 231 |
--speculative-eagle-topk 1 \
|
| 232 |
--speculative-num-draft-tokens 2 \
|
| 233 |
--speculative-algorithm EAGLE \
|
| 234 |
-
--served-model-name hy3-preview
|
| 235 |
```
|
| 236 |
|
| 237 |
## Training
|
|
@@ -259,4 +259,4 @@ If you would like to leave a message for our R&D and product teams, welcome to c
|
|
| 259 |
|
| 260 |
<p align="center">
|
| 261 |
<i>Hy3 preview is developed by the Tencent Hy Team.</i>
|
| 262 |
-
</p>
|
|
|
|
| 162 |
client = OpenAI(base_url="http://localhost:8000/v1", api_key="EMPTY")
|
| 163 |
|
| 164 |
response = client.chat.completions.create(
|
| 165 |
+
model="hy3-preview-base",
|
| 166 |
messages=[
|
| 167 |
{"role": "user", "content": "Hello! Can you briefly introduce yourself?"},
|
| 168 |
],
|
|
|
|
| 198 |
Start the vLLM server with MTP enabled:
|
| 199 |
|
| 200 |
```bash
|
| 201 |
+
vllm serve tencent/Hy3-preview-base \
|
| 202 |
--tensor-parallel-size 8 \
|
| 203 |
--speculative-config.method mtp \
|
| 204 |
--speculative-config.num_speculative_tokens 1 \
|
| 205 |
--tool-call-parser hy_v3 \
|
| 206 |
--reasoning-parser hy_v3 \
|
| 207 |
--enable-auto-tool-choice \
|
| 208 |
+
--served-model-name hy3-preview-base
|
| 209 |
```
|
| 210 |
|
| 211 |
### SGLang
|
|
|
|
| 223 |
|
| 224 |
```bash
|
| 225 |
python3 -m sglang.launch_server \
|
| 226 |
+
--model tencent/Hy3-preview-base \
|
| 227 |
--tp 8 \
|
| 228 |
--tool-call-parser hunyuan \
|
| 229 |
--reasoning-parser hunyuan \
|
|
|
|
| 231 |
--speculative-eagle-topk 1 \
|
| 232 |
--speculative-num-draft-tokens 2 \
|
| 233 |
--speculative-algorithm EAGLE \
|
| 234 |
+
--served-model-name hy3-preview-base
|
| 235 |
```
|
| 236 |
|
| 237 |
## Training
|
|
|
|
| 259 |
|
| 260 |
<p align="center">
|
| 261 |
<i>Hy3 preview is developed by the Tencent Hy Team.</i>
|
| 262 |
+
</p>
|
README_CN.md
CHANGED
|
@@ -157,7 +157,7 @@ from openai import OpenAI
|
|
| 157 |
client = OpenAI(base_url="http://localhost:8000/v1", api_key="EMPTY")
|
| 158 |
|
| 159 |
response = client.chat.completions.create(
|
| 160 |
-
model="
|
| 161 |
messages=[
|
| 162 |
{"role": "user", "content": "你好!请简单介绍一下你自己。"},
|
| 163 |
],
|
|
@@ -194,14 +194,14 @@ uv pip install --editable . --torch-backend=auto
|
|
| 194 |
启动 vLLM 服务,开启 MTP:
|
| 195 |
|
| 196 |
```bash
|
| 197 |
-
vllm serve tencent/Hy3-preview \
|
| 198 |
--tensor-parallel-size 8 \
|
| 199 |
--speculative-config.method mtp \
|
| 200 |
--speculative-config.num_speculative_tokens 1 \
|
| 201 |
--tool-call-parser hy_v3 \
|
| 202 |
--reasoning-parser hy_v3 \
|
| 203 |
--enable-auto-tool-choice \
|
| 204 |
-
--served-model-name hy3-preview
|
| 205 |
```
|
| 206 |
|
| 207 |
### SGLang
|
|
@@ -220,7 +220,7 @@ pip3 install -e "python"
|
|
| 220 |
|
| 221 |
```bash
|
| 222 |
python3 -m sglang.launch_server \
|
| 223 |
-
--model tencent/Hy3-preview \
|
| 224 |
--tp 8 \
|
| 225 |
--tool-call-parser hunyuan \
|
| 226 |
--reasoning-parser hunyuan \
|
|
@@ -228,7 +228,7 @@ python3 -m sglang.launch_server \
|
|
| 228 |
--speculative-eagle-topk 1 \
|
| 229 |
--speculative-num-draft-tokens 2 \
|
| 230 |
--speculative-algorithm EAGLE \
|
| 231 |
-
--served-model-name hy3-preview
|
| 232 |
```
|
| 233 |
|
| 234 |
## 模型训练
|
|
|
|
| 157 |
client = OpenAI(base_url="http://localhost:8000/v1", api_key="EMPTY")
|
| 158 |
|
| 159 |
response = client.chat.completions.create(
|
| 160 |
+
model="hy3-preview-base",
|
| 161 |
messages=[
|
| 162 |
{"role": "user", "content": "你好!请简单介绍一下你自己。"},
|
| 163 |
],
|
|
|
|
| 194 |
启动 vLLM 服务,开启 MTP:
|
| 195 |
|
| 196 |
```bash
|
| 197 |
+
vllm serve tencent/Hy3-preview-base \
|
| 198 |
--tensor-parallel-size 8 \
|
| 199 |
--speculative-config.method mtp \
|
| 200 |
--speculative-config.num_speculative_tokens 1 \
|
| 201 |
--tool-call-parser hy_v3 \
|
| 202 |
--reasoning-parser hy_v3 \
|
| 203 |
--enable-auto-tool-choice \
|
| 204 |
+
--served-model-name hy3-preview-base
|
| 205 |
```
|
| 206 |
|
| 207 |
### SGLang
|
|
|
|
| 220 |
|
| 221 |
```bash
|
| 222 |
python3 -m sglang.launch_server \
|
| 223 |
+
--model tencent/Hy3-preview-base \
|
| 224 |
--tp 8 \
|
| 225 |
--tool-call-parser hunyuan \
|
| 226 |
--reasoning-parser hunyuan \
|
|
|
|
| 228 |
--speculative-eagle-topk 1 \
|
| 229 |
--speculative-num-draft-tokens 2 \
|
| 230 |
--speculative-algorithm EAGLE \
|
| 231 |
+
--served-model-name hy3-preview-base
|
| 232 |
```
|
| 233 |
|
| 234 |
## 模型训练
|