LayerEight commited on
Commit
3fe1d1c
·
verified ·
1 Parent(s): 388483c

Upload Wikipedia-Networking-Qwen2.5-7B from Wikipedia-Networking-7B-output

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-1788/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ model_name: Wikipedia-Networking-7B-output
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-7B-Instruct
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ licence: license
12
+ pipeline_tag: text-generation
13
+ ---
14
+
15
+ # Model Card for Wikipedia-Networking-7B-output
16
+
17
+ This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct).
18
+ It has been trained using [TRL](https://github.com/huggingface/trl).
19
+
20
+ ## Quick start
21
+
22
+ ```python
23
+ from transformers import pipeline
24
+
25
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
26
+ generator = pipeline("text-generation", model="None", device="cuda")
27
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
28
+ print(output["generated_text"])
29
+ ```
30
+
31
+ ## Training procedure
32
+
33
+
34
+
35
+
36
+
37
+ This model was trained with SFT.
38
+
39
+ ### Framework versions
40
+
41
+ - PEFT 0.19.1
42
+ - TRL: 1.2.0
43
+ - Transformers: 5.5.1
44
+ - Pytorch: 2.11.0+rocm7.2
45
+ - Datasets: 4.8.4
46
+ - Tokenizers: 0.22.2
47
+
48
+ ## Citations
49
+
50
+
51
+
52
+ Cite TRL as:
53
+
54
+ ```bibtex
55
+ @software{vonwerra2020trl,
56
+ title = {{TRL: Transformers Reinforcement Learning}},
57
+ author = {von Werra, Leandro and Belkada, Younes and Tunstall, Lewis and Beeching, Edward and Thrush, Tristan and Lambert, Nathan and Huang, Shengyi and Rasul, Kashif and Gallouédec, Quentin},
58
+ license = {Apache-2.0},
59
+ url = {https://github.com/huggingface/trl},
60
+ year = {2020}
61
+ }
62
+ ```
adapter_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.19.1",
28
+ "qalora_group_size": 16,
29
+ "r": 16,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": [
33
+ "v_proj",
34
+ "down_proj",
35
+ "k_proj",
36
+ "up_proj",
37
+ "gate_proj",
38
+ "q_proj",
39
+ "o_proj"
40
+ ],
41
+ "target_parameters": null,
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_bdlora": null,
45
+ "use_dora": false,
46
+ "use_qalora": false,
47
+ "use_rslora": false
48
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71ce8f09c288cc8c8b7ac6193695f7f42b8be0ba400b084b9163a8060c8df7bb
3
+ size 80792880
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
checkpoint-1500/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-7B-Instruct
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.19.1
checkpoint-1500/adapter_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.19.1",
28
+ "qalora_group_size": 16,
29
+ "r": 16,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": [
33
+ "v_proj",
34
+ "down_proj",
35
+ "k_proj",
36
+ "up_proj",
37
+ "gate_proj",
38
+ "q_proj",
39
+ "o_proj"
40
+ ],
41
+ "target_parameters": null,
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_bdlora": null,
45
+ "use_dora": false,
46
+ "use_qalora": false,
47
+ "use_rslora": false
48
+ }
checkpoint-1500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d28fc138d3dffebe113f194603ad1661df4d1647602418576531f98b087c7cc0
3
+ size 80792880
checkpoint-1500/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67e3ebe9bce90372e1a800b5f658ef47b9f65684cc0ab5ed06755a76bf5b77b4
3
+ size 161810747
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d860ff33925c1b772004b2d307980bb90150429efe82bfdbc121257969ecd5aa
3
+ size 14645
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fb94d7208a78f2b9f0eaad152a2d3bdf7f4dc3b4166cc8ccf715eabb1c1337f
3
+ size 1465
checkpoint-1500/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": false,
24
+ "model_max_length": 131072,
25
+ "pad_token": "<|im_end|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.8393662784597629,
6
+ "eval_steps": 500,
7
+ "global_step": 1500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.6571732765436173,
14
+ "epoch": 0.013989437974329381,
15
+ "grad_norm": 0.0257568359375,
16
+ "learning_rate": 8.888888888888889e-05,
17
+ "loss": 1.9518862915039064,
18
+ "mean_token_accuracy": 0.5733682353049516,
19
+ "num_tokens": 294540.0,
20
+ "step": 25
21
+ },
22
+ {
23
+ "entropy": 1.7126372032612562,
24
+ "epoch": 0.027978875948658763,
25
+ "grad_norm": 0.01416015625,
26
+ "learning_rate": 0.0001814814814814815,
27
+ "loss": 1.6628182983398438,
28
+ "mean_token_accuracy": 0.6153687073290348,
29
+ "num_tokens": 593048.0,
30
+ "step": 50
31
+ },
32
+ {
33
+ "entropy": 1.6262741280719637,
34
+ "epoch": 0.041968313922988144,
35
+ "grad_norm": 0.01806640625,
36
+ "learning_rate": 0.00019993435766282977,
37
+ "loss": 1.584449005126953,
38
+ "mean_token_accuracy": 0.6257701878249645,
39
+ "num_tokens": 891346.0,
40
+ "step": 75
41
+ },
42
+ {
43
+ "entropy": 1.58288999453187,
44
+ "epoch": 0.055957751897317526,
45
+ "grad_norm": 0.016357421875,
46
+ "learning_rate": 0.00019966783335941717,
47
+ "loss": 1.5507936096191406,
48
+ "mean_token_accuracy": 0.6367124810814857,
49
+ "num_tokens": 1184611.0,
50
+ "step": 100
51
+ },
52
+ {
53
+ "entropy": 1.5433334041386844,
54
+ "epoch": 0.0699471898716469,
55
+ "grad_norm": 0.0189208984375,
56
+ "learning_rate": 0.00019919687071661834,
57
+ "loss": 1.512821044921875,
58
+ "mean_token_accuracy": 0.6423260240256786,
59
+ "num_tokens": 1480125.0,
60
+ "step": 125
61
+ },
62
+ {
63
+ "entropy": 1.560064207613468,
64
+ "epoch": 0.08393662784597629,
65
+ "grad_norm": 0.017822265625,
66
+ "learning_rate": 0.00019852243577149215,
67
+ "loss": 1.520980224609375,
68
+ "mean_token_accuracy": 0.6398584572970867,
69
+ "num_tokens": 1770905.0,
70
+ "step": 150
71
+ },
72
+ {
73
+ "entropy": 1.4850163259357214,
74
+ "epoch": 0.09792606582030566,
75
+ "grad_norm": 0.0235595703125,
76
+ "learning_rate": 0.00019764591192282993,
77
+ "loss": 1.460587158203125,
78
+ "mean_token_accuracy": 0.6578983479738235,
79
+ "num_tokens": 2061126.0,
80
+ "step": 175
81
+ },
82
+ {
83
+ "entropy": 1.4848217015340923,
84
+ "epoch": 0.11191550379463505,
85
+ "grad_norm": 0.03662109375,
86
+ "learning_rate": 0.00019656909709353236,
87
+ "loss": 1.4420854187011718,
88
+ "mean_token_accuracy": 0.6592926564067603,
89
+ "num_tokens": 2352476.0,
90
+ "step": 200
91
+ },
92
+ {
93
+ "entropy": 1.4575298309139908,
94
+ "epoch": 0.12590494176896444,
95
+ "grad_norm": 0.0196533203125,
96
+ "learning_rate": 0.00019529420004271567,
97
+ "loss": 1.4135537719726563,
98
+ "mean_token_accuracy": 0.6653153513371944,
99
+ "num_tokens": 2647293.0,
100
+ "step": 225
101
+ },
102
+ {
103
+ "entropy": 1.440113589912653,
104
+ "epoch": 0.1398943797432938,
105
+ "grad_norm": 0.0238037109375,
106
+ "learning_rate": 0.00019382383583511206,
107
+ "loss": 1.416180877685547,
108
+ "mean_token_accuracy": 0.6659375876933337,
109
+ "num_tokens": 2941859.0,
110
+ "step": 250
111
+ },
112
+ {
113
+ "entropy": 1.4387904461845755,
114
+ "epoch": 0.1538838177176232,
115
+ "grad_norm": 0.0263671875,
116
+ "learning_rate": 0.00019216102047705746,
117
+ "loss": 1.38541015625,
118
+ "mean_token_accuracy": 0.6683934908360243,
119
+ "num_tokens": 3237763.0,
120
+ "step": 275
121
+ },
122
+ {
123
+ "entropy": 1.3664818671159447,
124
+ "epoch": 0.16787325569195258,
125
+ "grad_norm": 0.02001953125,
126
+ "learning_rate": 0.00019030916473006888,
127
+ "loss": 1.3284611511230469,
128
+ "mean_token_accuracy": 0.6843961907178163,
129
+ "num_tokens": 3541854.0,
130
+ "step": 300
131
+ },
132
+ {
133
+ "entropy": 1.3906436143629253,
134
+ "epoch": 0.18186269366628197,
135
+ "grad_norm": 0.0296630859375,
136
+ "learning_rate": 0.00018827206711470137,
137
+ "loss": 1.3411039733886718,
138
+ "mean_token_accuracy": 0.6773389867693186,
139
+ "num_tokens": 3832012.0,
140
+ "step": 325
141
+ },
142
+ {
143
+ "entropy": 1.3563544462341814,
144
+ "epoch": 0.19585213164061133,
145
+ "grad_norm": 0.0233154296875,
146
+ "learning_rate": 0.000186053906119035,
147
+ "loss": 1.3131784057617188,
148
+ "mean_token_accuracy": 0.686054406017065,
149
+ "num_tokens": 4129238.0,
150
+ "step": 350
151
+ },
152
+ {
153
+ "entropy": 1.3752087260596455,
154
+ "epoch": 0.20984156961494072,
155
+ "grad_norm": 0.0272216796875,
156
+ "learning_rate": 0.00018365923162777336,
157
+ "loss": 1.338323974609375,
158
+ "mean_token_accuracy": 0.6795472690463066,
159
+ "num_tokens": 4414939.0,
160
+ "step": 375
161
+ },
162
+ {
163
+ "entropy": 1.3106766785029322,
164
+ "epoch": 0.2238310075892701,
165
+ "grad_norm": 0.027099609375,
166
+ "learning_rate": 0.0001810929555895348,
167
+ "loss": 1.2676075744628905,
168
+ "mean_token_accuracy": 0.6990727451443672,
169
+ "num_tokens": 4711065.0,
170
+ "step": 400
171
+ },
172
+ {
173
+ "entropy": 1.2741564935538918,
174
+ "epoch": 0.2378204455635995,
175
+ "grad_norm": 0.0341796875,
176
+ "learning_rate": 0.00017836034194147922,
177
+ "loss": 1.231330337524414,
178
+ "mean_token_accuracy": 0.7052560625225306,
179
+ "num_tokens": 5006515.0,
180
+ "step": 425
181
+ },
182
+ {
183
+ "entropy": 1.255356255499646,
184
+ "epoch": 0.2518098835379289,
185
+ "grad_norm": 0.025390625,
186
+ "learning_rate": 0.000175466995811937,
187
+ "loss": 1.2256485748291015,
188
+ "mean_token_accuracy": 0.709921350106597,
189
+ "num_tokens": 5298743.0,
190
+ "step": 450
191
+ },
192
+ {
193
+ "entropy": 1.2404796612542122,
194
+ "epoch": 0.26579932151225827,
195
+ "grad_norm": 0.029296875,
196
+ "learning_rate": 0.00017241885202318787,
197
+ "loss": 1.199592056274414,
198
+ "mean_token_accuracy": 0.7124969442933797,
199
+ "num_tokens": 5587292.0,
200
+ "step": 475
201
+ },
202
+ {
203
+ "entropy": 1.2135492172930389,
204
+ "epoch": 0.2797887594865876,
205
+ "grad_norm": 0.0341796875,
206
+ "learning_rate": 0.00016922216291797248,
207
+ "loss": 1.1718869781494141,
208
+ "mean_token_accuracy": 0.7194690197706223,
209
+ "num_tokens": 5878179.0,
210
+ "step": 500
211
+ },
212
+ {
213
+ "entropy": 1.1519292463734745,
214
+ "epoch": 0.293778197460917,
215
+ "grad_norm": 0.044921875,
216
+ "learning_rate": 0.0001658834855347071,
217
+ "loss": 1.1038724517822265,
218
+ "mean_token_accuracy": 0.7360028678923846,
219
+ "num_tokens": 6174073.0,
220
+ "step": 525
221
+ },
222
+ {
223
+ "entropy": 1.2023496214020997,
224
+ "epoch": 0.3077676354352464,
225
+ "grad_norm": 0.035400390625,
226
+ "learning_rate": 0.00016240966815770754,
227
+ "loss": 1.1433139801025392,
228
+ "mean_token_accuracy": 0.7237949234992266,
229
+ "num_tokens": 6471377.0,
230
+ "step": 550
231
+ },
232
+ {
233
+ "entropy": 1.2287814150052145,
234
+ "epoch": 0.32175707340957577,
235
+ "grad_norm": 0.0262451171875,
236
+ "learning_rate": 0.00015880783627001026,
237
+ "loss": 1.196635513305664,
238
+ "mean_token_accuracy": 0.7167265053838492,
239
+ "num_tokens": 6762946.0,
240
+ "step": 575
241
+ },
242
+ {
243
+ "entropy": 1.1513058760762214,
244
+ "epoch": 0.33574651138390516,
245
+ "grad_norm": 0.03759765625,
246
+ "learning_rate": 0.0001550853779376045,
247
+ "loss": 1.104804229736328,
248
+ "mean_token_accuracy": 0.7350956991314888,
249
+ "num_tokens": 7058166.0,
250
+ "step": 600
251
+ },
252
+ {
253
+ "entropy": 1.1233506935788318,
254
+ "epoch": 0.34973594935823454,
255
+ "grad_norm": 0.038818359375,
256
+ "learning_rate": 0.00015124992865505523,
257
+ "loss": 1.0836825561523438,
258
+ "mean_token_accuracy": 0.7427010469138622,
259
+ "num_tokens": 7351236.0,
260
+ "step": 625
261
+ },
262
+ {
263
+ "entropy": 1.1373229866661132,
264
+ "epoch": 0.36372538733256393,
265
+ "grad_norm": 0.03466796875,
266
+ "learning_rate": 0.00014730935568360102,
267
+ "loss": 1.0919375610351563,
268
+ "mean_token_accuracy": 0.7399685283005237,
269
+ "num_tokens": 7642275.0,
270
+ "step": 650
271
+ },
272
+ {
273
+ "entropy": 1.1448556556599214,
274
+ "epoch": 0.3777148253068933,
275
+ "grad_norm": 0.033447265625,
276
+ "learning_rate": 0.0001432717419138532,
277
+ "loss": 1.0814331817626952,
278
+ "mean_token_accuracy": 0.7378807676583529,
279
+ "num_tokens": 7935222.0,
280
+ "step": 675
281
+ },
282
+ {
283
+ "entropy": 1.040786258596927,
284
+ "epoch": 0.39170426328122265,
285
+ "grad_norm": 0.0311279296875,
286
+ "learning_rate": 0.0001391453692861967,
287
+ "loss": 0.9921630096435546,
288
+ "mean_token_accuracy": 0.7638325883448124,
289
+ "num_tokens": 8234237.0,
290
+ "step": 700
291
+ },
292
+ {
293
+ "entropy": 1.1662991707585753,
294
+ "epoch": 0.40569370125555204,
295
+ "grad_norm": 0.0302734375,
296
+ "learning_rate": 0.0001349387018029003,
297
+ "loss": 1.118931884765625,
298
+ "mean_token_accuracy": 0.732994829416275,
299
+ "num_tokens": 8529663.0,
300
+ "step": 725
301
+ },
302
+ {
303
+ "entropy": 1.049061678093858,
304
+ "epoch": 0.41968313922988143,
305
+ "grad_norm": 0.041015625,
306
+ "learning_rate": 0.00013066036816678273,
307
+ "loss": 0.9903451538085938,
308
+ "mean_token_accuracy": 0.7613946237415076,
309
+ "num_tokens": 8817967.0,
310
+ "step": 750
311
+ },
312
+ {
313
+ "entropy": 1.1110075389547274,
314
+ "epoch": 0.4336725772042108,
315
+ "grad_norm": 0.0361328125,
316
+ "learning_rate": 0.0001263191440820448,
317
+ "loss": 1.0462547302246095,
318
+ "mean_token_accuracy": 0.7477244459837675,
319
+ "num_tokens": 9107363.0,
320
+ "step": 775
321
+ },
322
+ {
323
+ "entropy": 1.024681367701851,
324
+ "epoch": 0.4476620151785402,
325
+ "grad_norm": 0.039306640625,
326
+ "learning_rate": 0.00012192393425357353,
327
+ "loss": 0.9708639526367188,
328
+ "mean_token_accuracy": 0.7660154252499342,
329
+ "num_tokens": 9398385.0,
330
+ "step": 800
331
+ },
332
+ {
333
+ "entropy": 1.0914598937472328,
334
+ "epoch": 0.4616514531528696,
335
+ "grad_norm": 0.03662109375,
336
+ "learning_rate": 0.00011748375412163984,
337
+ "loss": 1.0285856628417969,
338
+ "mean_token_accuracy": 0.7533510192483663,
339
+ "num_tokens": 9692995.0,
340
+ "step": 825
341
+ },
342
+ {
343
+ "entropy": 1.070437773577869,
344
+ "epoch": 0.475640891127199,
345
+ "grad_norm": 0.0308837890625,
346
+ "learning_rate": 0.00011300771136945658,
347
+ "loss": 1.002013931274414,
348
+ "mean_token_accuracy": 0.7574464529007673,
349
+ "num_tokens": 9985540.0,
350
+ "step": 850
351
+ },
352
+ {
353
+ "entropy": 1.0500496013974772,
354
+ "epoch": 0.48963032910152837,
355
+ "grad_norm": 0.033935546875,
356
+ "learning_rate": 0.00010850498724152798,
357
+ "loss": 1.0061278533935547,
358
+ "mean_token_accuracy": 0.7601391483098269,
359
+ "num_tokens": 10280858.0,
360
+ "step": 875
361
+ },
362
+ {
363
+ "entropy": 0.9829169751703739,
364
+ "epoch": 0.5036197670758578,
365
+ "grad_norm": 0.035888671875,
366
+ "learning_rate": 0.00010398481771111037,
367
+ "loss": 0.9166652679443359,
368
+ "mean_token_accuracy": 0.7795024861395359,
369
+ "num_tokens": 10571194.0,
370
+ "step": 900
371
+ },
372
+ {
373
+ "entropy": 0.9858249768940732,
374
+ "epoch": 0.5176092050501871,
375
+ "grad_norm": 0.038818359375,
376
+ "learning_rate": 9.94564745354137e-05,
377
+ "loss": 0.9250814819335937,
378
+ "mean_token_accuracy": 0.7783324559032917,
379
+ "num_tokens": 10861535.0,
380
+ "step": 925
381
+ },
382
+ {
383
+ "entropy": 0.9617049301974475,
384
+ "epoch": 0.5315986430245165,
385
+ "grad_norm": 0.0419921875,
386
+ "learning_rate": 9.492924623740271e-05,
387
+ "loss": 0.9093881988525391,
388
+ "mean_token_accuracy": 0.7818463468551635,
389
+ "num_tokens": 11157232.0,
390
+ "step": 950
391
+ },
392
+ {
393
+ "entropy": 0.9786588902026415,
394
+ "epoch": 0.5455880809988459,
395
+ "grad_norm": 0.031494140625,
396
+ "learning_rate": 9.04124190532087e-05,
397
+ "loss": 0.9290876007080078,
398
+ "mean_token_accuracy": 0.7784507688879967,
399
+ "num_tokens": 11451817.0,
400
+ "step": 975
401
+ },
402
+ {
403
+ "entropy": 0.9941231621522456,
404
+ "epoch": 0.5595775189731752,
405
+ "grad_norm": 0.03662109375,
406
+ "learning_rate": 8.591525788423168e-05,
407
+ "loss": 0.9281739807128906,
408
+ "mean_token_accuracy": 0.7773911864310503,
409
+ "num_tokens": 11746337.0,
410
+ "step": 1000
411
+ },
412
+ {
413
+ "entropy": 0.9537221440812573,
414
+ "epoch": 0.5735669569475046,
415
+ "grad_norm": 0.0303955078125,
416
+ "learning_rate": 8.144698729300455e-05,
417
+ "loss": 0.8921097564697266,
418
+ "mean_token_accuracy": 0.7855567722022534,
419
+ "num_tokens": 12037412.0,
420
+ "step": 1025
421
+ },
422
+ {
423
+ "entropy": 0.9557448910363019,
424
+ "epoch": 0.587556394921834,
425
+ "grad_norm": 0.029052734375,
426
+ "learning_rate": 7.701677258179996e-05,
427
+ "loss": 0.9018702697753906,
428
+ "mean_token_accuracy": 0.78552112005651,
429
+ "num_tokens": 12330073.0,
430
+ "step": 1050
431
+ },
432
+ {
433
+ "entropy": 0.9831135825067758,
434
+ "epoch": 0.6015458328961634,
435
+ "grad_norm": 0.031005859375,
436
+ "learning_rate": 7.263370099279172e-05,
437
+ "loss": 0.9220442199707031,
438
+ "mean_token_accuracy": 0.7789029122143983,
439
+ "num_tokens": 12623027.0,
440
+ "step": 1075
441
+ },
442
+ {
443
+ "entropy": 0.8838035979354754,
444
+ "epoch": 0.6155352708704928,
445
+ "grad_norm": 0.040283203125,
446
+ "learning_rate": 6.83067630683331e-05,
447
+ "loss": 0.8227420043945313,
448
+ "mean_token_accuracy": 0.8038330339640379,
449
+ "num_tokens": 12916473.0,
450
+ "step": 1100
451
+ },
452
+ {
453
+ "entropy": 0.883046350381337,
454
+ "epoch": 0.6295247088448221,
455
+ "grad_norm": 0.037353515625,
456
+ "learning_rate": 6.404483420958494e-05,
457
+ "loss": 0.8318240356445312,
458
+ "mean_token_accuracy": 0.8026394218951464,
459
+ "num_tokens": 13210350.0,
460
+ "step": 1125
461
+ },
462
+ {
463
+ "entropy": 0.9277670627878979,
464
+ "epoch": 0.6435141468191515,
465
+ "grad_norm": 0.02783203125,
466
+ "learning_rate": 5.9856656471321636e-05,
467
+ "loss": 0.8780873870849609,
468
+ "mean_token_accuracy": 0.7957132039964199,
469
+ "num_tokens": 13501483.0,
470
+ "step": 1150
471
+ },
472
+ {
473
+ "entropy": 0.9232714768499136,
474
+ "epoch": 0.6575035847934809,
475
+ "grad_norm": 0.0255126953125,
476
+ "learning_rate": 5.575082063025619e-05,
477
+ "loss": 0.8550299835205079,
478
+ "mean_token_accuracy": 0.7936624947935342,
479
+ "num_tokens": 13797592.0,
480
+ "step": 1175
481
+ },
482
+ {
483
+ "entropy": 0.899801154313609,
484
+ "epoch": 0.6714930227678103,
485
+ "grad_norm": 0.03369140625,
486
+ "learning_rate": 5.173574856366683e-05,
487
+ "loss": 0.8404230499267578,
488
+ "mean_token_accuracy": 0.7989044986665249,
489
+ "num_tokens": 14085126.0,
490
+ "step": 1200
491
+ },
492
+ {
493
+ "entropy": 0.8731577204307541,
494
+ "epoch": 0.6854824607421397,
495
+ "grad_norm": 0.033203125,
496
+ "learning_rate": 4.7819675974469356e-05,
497
+ "loss": 0.8161344146728515,
498
+ "mean_token_accuracy": 0.8059349462389946,
499
+ "num_tokens": 14383100.0,
500
+ "step": 1225
501
+ },
502
+ {
503
+ "entropy": 0.9630948188621551,
504
+ "epoch": 0.6994718987164691,
505
+ "grad_norm": 0.033935546875,
506
+ "learning_rate": 4.401063549816984e-05,
507
+ "loss": 0.91357177734375,
508
+ "mean_token_accuracy": 0.7841827914863825,
509
+ "num_tokens": 14682819.0,
510
+ "step": 1250
511
+ },
512
+ {
513
+ "entropy": 0.9606740972073748,
514
+ "epoch": 0.7134613366907985,
515
+ "grad_norm": 0.02783203125,
516
+ "learning_rate": 4.031644022634831e-05,
517
+ "loss": 0.896214599609375,
518
+ "mean_token_accuracy": 0.7852722837030888,
519
+ "num_tokens": 14972956.0,
520
+ "step": 1275
521
+ },
522
+ {
523
+ "entropy": 0.9392211610358209,
524
+ "epoch": 0.7274507746651279,
525
+ "grad_norm": 0.033447265625,
526
+ "learning_rate": 3.674466768047078e-05,
527
+ "loss": 0.8826549530029297,
528
+ "mean_token_accuracy": 0.7912480696290731,
529
+ "num_tokens": 15271172.0,
530
+ "step": 1300
531
+ },
532
+ {
533
+ "entropy": 0.9369741760706529,
534
+ "epoch": 0.7414402126394573,
535
+ "grad_norm": 0.0228271484375,
536
+ "learning_rate": 3.330264426890114e-05,
537
+ "loss": 0.8716287994384766,
538
+ "mean_token_accuracy": 0.7906265539675951,
539
+ "num_tokens": 15567414.0,
540
+ "step": 1325
541
+ },
542
+ {
543
+ "entropy": 0.8475008888402954,
544
+ "epoch": 0.7554296506137866,
545
+ "grad_norm": 0.02734375,
546
+ "learning_rate": 2.9997430258996208e-05,
547
+ "loss": 0.7837786865234375,
548
+ "mean_token_accuracy": 0.8113828019797802,
549
+ "num_tokens": 15862493.0,
550
+ "step": 1350
551
+ },
552
+ {
553
+ "entropy": 0.8544112277310342,
554
+ "epoch": 0.7694190885881159,
555
+ "grad_norm": 0.0400390625,
556
+ "learning_rate": 2.6835805295107897e-05,
557
+ "loss": 0.7910999298095703,
558
+ "mean_token_accuracy": 0.8121292810887099,
559
+ "num_tokens": 16154298.0,
560
+ "step": 1375
561
+ },
562
+ {
563
+ "entropy": 0.8836554439552128,
564
+ "epoch": 0.7834085265624453,
565
+ "grad_norm": 0.03271484375,
566
+ "learning_rate": 2.382425449219854e-05,
567
+ "loss": 0.8322444152832031,
568
+ "mean_token_accuracy": 0.8058089179545641,
569
+ "num_tokens": 16450244.0,
570
+ "step": 1400
571
+ },
572
+ {
573
+ "entropy": 0.9750821837875993,
574
+ "epoch": 0.7973979645367747,
575
+ "grad_norm": 0.0279541015625,
576
+ "learning_rate": 2.0968955133593805e-05,
577
+ "loss": 0.9110565948486328,
578
+ "mean_token_accuracy": 0.7827954424917698,
579
+ "num_tokens": 16742734.0,
580
+ "step": 1425
581
+ },
582
+ {
583
+ "entropy": 0.8045650019776076,
584
+ "epoch": 0.8113874025111041,
585
+ "grad_norm": 0.037841796875,
586
+ "learning_rate": 1.8275764000159222e-05,
587
+ "loss": 0.7386186218261719,
588
+ "mean_token_accuracy": 0.8229204141348601,
589
+ "num_tokens": 17040960.0,
590
+ "step": 1450
591
+ },
592
+ {
593
+ "entropy": 0.9300361970160157,
594
+ "epoch": 0.8253768404854335,
595
+ "grad_norm": 0.0303955078125,
596
+ "learning_rate": 1.5750205356889937e-05,
597
+ "loss": 0.8793846130371094,
598
+ "mean_token_accuracy": 0.7934108532965183,
599
+ "num_tokens": 17340398.0,
600
+ "step": 1475
601
+ },
602
+ {
603
+ "entropy": 0.9426807796442881,
604
+ "epoch": 0.8393662784597629,
605
+ "grad_norm": 0.032958984375,
606
+ "learning_rate": 1.339745962155613e-05,
607
+ "loss": 0.8905538940429687,
608
+ "mean_token_accuracy": 0.7892987384647131,
609
+ "num_tokens": 17629025.0,
610
+ "step": 1500
611
+ }
612
+ ],
613
+ "logging_steps": 25,
614
+ "max_steps": 1788,
615
+ "num_input_tokens_seen": 0,
616
+ "num_train_epochs": 1,
617
+ "save_steps": 500,
618
+ "stateful_callbacks": {
619
+ "TrainerControl": {
620
+ "args": {
621
+ "should_epoch_stop": false,
622
+ "should_evaluate": false,
623
+ "should_log": false,
624
+ "should_save": true,
625
+ "should_training_stop": false
626
+ },
627
+ "attributes": {}
628
+ }
629
+ },
630
+ "total_flos": 7.521588501358848e+17,
631
+ "train_batch_size": 1,
632
+ "trial_name": null,
633
+ "trial_params": null
634
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0ea67ee3810c6e10021bd35d99633fb742a8db898d131b220e180054e0dcd7
3
+ size 5649
checkpoint-1788/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-7B-Instruct
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.19.1
checkpoint-1788/adapter_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.19.1",
28
+ "qalora_group_size": 16,
29
+ "r": 16,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": [
33
+ "v_proj",
34
+ "down_proj",
35
+ "k_proj",
36
+ "up_proj",
37
+ "gate_proj",
38
+ "q_proj",
39
+ "o_proj"
40
+ ],
41
+ "target_parameters": null,
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_bdlora": null,
45
+ "use_dora": false,
46
+ "use_qalora": false,
47
+ "use_rslora": false
48
+ }
checkpoint-1788/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71ce8f09c288cc8c8b7ac6193695f7f42b8be0ba400b084b9163a8060c8df7bb
3
+ size 80792880
checkpoint-1788/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
checkpoint-1788/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb8661ae01cc3ee4014461498f74d402aa6d9cc3db91e59646e1f8d67b309f8b
3
+ size 161810747
checkpoint-1788/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dcad7f60928d68ad1f7bdc772f984932b0ff9aea4bcae90a976891265970458
3
+ size 14645
checkpoint-1788/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15bf83816ae439c3a890f09427b1f466dd2c99c9610a037a13b4a852802cadfe
3
+ size 1465
checkpoint-1788/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
checkpoint-1788/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": false,
24
+ "model_max_length": 131072,
25
+ "pad_token": "<|im_end|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
checkpoint-1788/trainer_state.json ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1788,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.6571732765436173,
14
+ "epoch": 0.013989437974329381,
15
+ "grad_norm": 0.0257568359375,
16
+ "learning_rate": 8.888888888888889e-05,
17
+ "loss": 1.9518862915039064,
18
+ "mean_token_accuracy": 0.5733682353049516,
19
+ "num_tokens": 294540.0,
20
+ "step": 25
21
+ },
22
+ {
23
+ "entropy": 1.7126372032612562,
24
+ "epoch": 0.027978875948658763,
25
+ "grad_norm": 0.01416015625,
26
+ "learning_rate": 0.0001814814814814815,
27
+ "loss": 1.6628182983398438,
28
+ "mean_token_accuracy": 0.6153687073290348,
29
+ "num_tokens": 593048.0,
30
+ "step": 50
31
+ },
32
+ {
33
+ "entropy": 1.6262741280719637,
34
+ "epoch": 0.041968313922988144,
35
+ "grad_norm": 0.01806640625,
36
+ "learning_rate": 0.00019993435766282977,
37
+ "loss": 1.584449005126953,
38
+ "mean_token_accuracy": 0.6257701878249645,
39
+ "num_tokens": 891346.0,
40
+ "step": 75
41
+ },
42
+ {
43
+ "entropy": 1.58288999453187,
44
+ "epoch": 0.055957751897317526,
45
+ "grad_norm": 0.016357421875,
46
+ "learning_rate": 0.00019966783335941717,
47
+ "loss": 1.5507936096191406,
48
+ "mean_token_accuracy": 0.6367124810814857,
49
+ "num_tokens": 1184611.0,
50
+ "step": 100
51
+ },
52
+ {
53
+ "entropy": 1.5433334041386844,
54
+ "epoch": 0.0699471898716469,
55
+ "grad_norm": 0.0189208984375,
56
+ "learning_rate": 0.00019919687071661834,
57
+ "loss": 1.512821044921875,
58
+ "mean_token_accuracy": 0.6423260240256786,
59
+ "num_tokens": 1480125.0,
60
+ "step": 125
61
+ },
62
+ {
63
+ "entropy": 1.560064207613468,
64
+ "epoch": 0.08393662784597629,
65
+ "grad_norm": 0.017822265625,
66
+ "learning_rate": 0.00019852243577149215,
67
+ "loss": 1.520980224609375,
68
+ "mean_token_accuracy": 0.6398584572970867,
69
+ "num_tokens": 1770905.0,
70
+ "step": 150
71
+ },
72
+ {
73
+ "entropy": 1.4850163259357214,
74
+ "epoch": 0.09792606582030566,
75
+ "grad_norm": 0.0235595703125,
76
+ "learning_rate": 0.00019764591192282993,
77
+ "loss": 1.460587158203125,
78
+ "mean_token_accuracy": 0.6578983479738235,
79
+ "num_tokens": 2061126.0,
80
+ "step": 175
81
+ },
82
+ {
83
+ "entropy": 1.4848217015340923,
84
+ "epoch": 0.11191550379463505,
85
+ "grad_norm": 0.03662109375,
86
+ "learning_rate": 0.00019656909709353236,
87
+ "loss": 1.4420854187011718,
88
+ "mean_token_accuracy": 0.6592926564067603,
89
+ "num_tokens": 2352476.0,
90
+ "step": 200
91
+ },
92
+ {
93
+ "entropy": 1.4575298309139908,
94
+ "epoch": 0.12590494176896444,
95
+ "grad_norm": 0.0196533203125,
96
+ "learning_rate": 0.00019529420004271567,
97
+ "loss": 1.4135537719726563,
98
+ "mean_token_accuracy": 0.6653153513371944,
99
+ "num_tokens": 2647293.0,
100
+ "step": 225
101
+ },
102
+ {
103
+ "entropy": 1.440113589912653,
104
+ "epoch": 0.1398943797432938,
105
+ "grad_norm": 0.0238037109375,
106
+ "learning_rate": 0.00019382383583511206,
107
+ "loss": 1.416180877685547,
108
+ "mean_token_accuracy": 0.6659375876933337,
109
+ "num_tokens": 2941859.0,
110
+ "step": 250
111
+ },
112
+ {
113
+ "entropy": 1.4387904461845755,
114
+ "epoch": 0.1538838177176232,
115
+ "grad_norm": 0.0263671875,
116
+ "learning_rate": 0.00019216102047705746,
117
+ "loss": 1.38541015625,
118
+ "mean_token_accuracy": 0.6683934908360243,
119
+ "num_tokens": 3237763.0,
120
+ "step": 275
121
+ },
122
+ {
123
+ "entropy": 1.3664818671159447,
124
+ "epoch": 0.16787325569195258,
125
+ "grad_norm": 0.02001953125,
126
+ "learning_rate": 0.00019030916473006888,
127
+ "loss": 1.3284611511230469,
128
+ "mean_token_accuracy": 0.6843961907178163,
129
+ "num_tokens": 3541854.0,
130
+ "step": 300
131
+ },
132
+ {
133
+ "entropy": 1.3906436143629253,
134
+ "epoch": 0.18186269366628197,
135
+ "grad_norm": 0.0296630859375,
136
+ "learning_rate": 0.00018827206711470137,
137
+ "loss": 1.3411039733886718,
138
+ "mean_token_accuracy": 0.6773389867693186,
139
+ "num_tokens": 3832012.0,
140
+ "step": 325
141
+ },
142
+ {
143
+ "entropy": 1.3563544462341814,
144
+ "epoch": 0.19585213164061133,
145
+ "grad_norm": 0.0233154296875,
146
+ "learning_rate": 0.000186053906119035,
147
+ "loss": 1.3131784057617188,
148
+ "mean_token_accuracy": 0.686054406017065,
149
+ "num_tokens": 4129238.0,
150
+ "step": 350
151
+ },
152
+ {
153
+ "entropy": 1.3752087260596455,
154
+ "epoch": 0.20984156961494072,
155
+ "grad_norm": 0.0272216796875,
156
+ "learning_rate": 0.00018365923162777336,
157
+ "loss": 1.338323974609375,
158
+ "mean_token_accuracy": 0.6795472690463066,
159
+ "num_tokens": 4414939.0,
160
+ "step": 375
161
+ },
162
+ {
163
+ "entropy": 1.3106766785029322,
164
+ "epoch": 0.2238310075892701,
165
+ "grad_norm": 0.027099609375,
166
+ "learning_rate": 0.0001810929555895348,
167
+ "loss": 1.2676075744628905,
168
+ "mean_token_accuracy": 0.6990727451443672,
169
+ "num_tokens": 4711065.0,
170
+ "step": 400
171
+ },
172
+ {
173
+ "entropy": 1.2741564935538918,
174
+ "epoch": 0.2378204455635995,
175
+ "grad_norm": 0.0341796875,
176
+ "learning_rate": 0.00017836034194147922,
177
+ "loss": 1.231330337524414,
178
+ "mean_token_accuracy": 0.7052560625225306,
179
+ "num_tokens": 5006515.0,
180
+ "step": 425
181
+ },
182
+ {
183
+ "entropy": 1.255356255499646,
184
+ "epoch": 0.2518098835379289,
185
+ "grad_norm": 0.025390625,
186
+ "learning_rate": 0.000175466995811937,
187
+ "loss": 1.2256485748291015,
188
+ "mean_token_accuracy": 0.709921350106597,
189
+ "num_tokens": 5298743.0,
190
+ "step": 450
191
+ },
192
+ {
193
+ "entropy": 1.2404796612542122,
194
+ "epoch": 0.26579932151225827,
195
+ "grad_norm": 0.029296875,
196
+ "learning_rate": 0.00017241885202318787,
197
+ "loss": 1.199592056274414,
198
+ "mean_token_accuracy": 0.7124969442933797,
199
+ "num_tokens": 5587292.0,
200
+ "step": 475
201
+ },
202
+ {
203
+ "entropy": 1.2135492172930389,
204
+ "epoch": 0.2797887594865876,
205
+ "grad_norm": 0.0341796875,
206
+ "learning_rate": 0.00016922216291797248,
207
+ "loss": 1.1718869781494141,
208
+ "mean_token_accuracy": 0.7194690197706223,
209
+ "num_tokens": 5878179.0,
210
+ "step": 500
211
+ },
212
+ {
213
+ "entropy": 1.1519292463734745,
214
+ "epoch": 0.293778197460917,
215
+ "grad_norm": 0.044921875,
216
+ "learning_rate": 0.0001658834855347071,
217
+ "loss": 1.1038724517822265,
218
+ "mean_token_accuracy": 0.7360028678923846,
219
+ "num_tokens": 6174073.0,
220
+ "step": 525
221
+ },
222
+ {
223
+ "entropy": 1.2023496214020997,
224
+ "epoch": 0.3077676354352464,
225
+ "grad_norm": 0.035400390625,
226
+ "learning_rate": 0.00016240966815770754,
227
+ "loss": 1.1433139801025392,
228
+ "mean_token_accuracy": 0.7237949234992266,
229
+ "num_tokens": 6471377.0,
230
+ "step": 550
231
+ },
232
+ {
233
+ "entropy": 1.2287814150052145,
234
+ "epoch": 0.32175707340957577,
235
+ "grad_norm": 0.0262451171875,
236
+ "learning_rate": 0.00015880783627001026,
237
+ "loss": 1.196635513305664,
238
+ "mean_token_accuracy": 0.7167265053838492,
239
+ "num_tokens": 6762946.0,
240
+ "step": 575
241
+ },
242
+ {
243
+ "entropy": 1.1513058760762214,
244
+ "epoch": 0.33574651138390516,
245
+ "grad_norm": 0.03759765625,
246
+ "learning_rate": 0.0001550853779376045,
247
+ "loss": 1.104804229736328,
248
+ "mean_token_accuracy": 0.7350956991314888,
249
+ "num_tokens": 7058166.0,
250
+ "step": 600
251
+ },
252
+ {
253
+ "entropy": 1.1233506935788318,
254
+ "epoch": 0.34973594935823454,
255
+ "grad_norm": 0.038818359375,
256
+ "learning_rate": 0.00015124992865505523,
257
+ "loss": 1.0836825561523438,
258
+ "mean_token_accuracy": 0.7427010469138622,
259
+ "num_tokens": 7351236.0,
260
+ "step": 625
261
+ },
262
+ {
263
+ "entropy": 1.1373229866661132,
264
+ "epoch": 0.36372538733256393,
265
+ "grad_norm": 0.03466796875,
266
+ "learning_rate": 0.00014730935568360102,
267
+ "loss": 1.0919375610351563,
268
+ "mean_token_accuracy": 0.7399685283005237,
269
+ "num_tokens": 7642275.0,
270
+ "step": 650
271
+ },
272
+ {
273
+ "entropy": 1.1448556556599214,
274
+ "epoch": 0.3777148253068933,
275
+ "grad_norm": 0.033447265625,
276
+ "learning_rate": 0.0001432717419138532,
277
+ "loss": 1.0814331817626952,
278
+ "mean_token_accuracy": 0.7378807676583529,
279
+ "num_tokens": 7935222.0,
280
+ "step": 675
281
+ },
282
+ {
283
+ "entropy": 1.040786258596927,
284
+ "epoch": 0.39170426328122265,
285
+ "grad_norm": 0.0311279296875,
286
+ "learning_rate": 0.0001391453692861967,
287
+ "loss": 0.9921630096435546,
288
+ "mean_token_accuracy": 0.7638325883448124,
289
+ "num_tokens": 8234237.0,
290
+ "step": 700
291
+ },
292
+ {
293
+ "entropy": 1.1662991707585753,
294
+ "epoch": 0.40569370125555204,
295
+ "grad_norm": 0.0302734375,
296
+ "learning_rate": 0.0001349387018029003,
297
+ "loss": 1.118931884765625,
298
+ "mean_token_accuracy": 0.732994829416275,
299
+ "num_tokens": 8529663.0,
300
+ "step": 725
301
+ },
302
+ {
303
+ "entropy": 1.049061678093858,
304
+ "epoch": 0.41968313922988143,
305
+ "grad_norm": 0.041015625,
306
+ "learning_rate": 0.00013066036816678273,
307
+ "loss": 0.9903451538085938,
308
+ "mean_token_accuracy": 0.7613946237415076,
309
+ "num_tokens": 8817967.0,
310
+ "step": 750
311
+ },
312
+ {
313
+ "entropy": 1.1110075389547274,
314
+ "epoch": 0.4336725772042108,
315
+ "grad_norm": 0.0361328125,
316
+ "learning_rate": 0.0001263191440820448,
317
+ "loss": 1.0462547302246095,
318
+ "mean_token_accuracy": 0.7477244459837675,
319
+ "num_tokens": 9107363.0,
320
+ "step": 775
321
+ },
322
+ {
323
+ "entropy": 1.024681367701851,
324
+ "epoch": 0.4476620151785402,
325
+ "grad_norm": 0.039306640625,
326
+ "learning_rate": 0.00012192393425357353,
327
+ "loss": 0.9708639526367188,
328
+ "mean_token_accuracy": 0.7660154252499342,
329
+ "num_tokens": 9398385.0,
330
+ "step": 800
331
+ },
332
+ {
333
+ "entropy": 1.0914598937472328,
334
+ "epoch": 0.4616514531528696,
335
+ "grad_norm": 0.03662109375,
336
+ "learning_rate": 0.00011748375412163984,
337
+ "loss": 1.0285856628417969,
338
+ "mean_token_accuracy": 0.7533510192483663,
339
+ "num_tokens": 9692995.0,
340
+ "step": 825
341
+ },
342
+ {
343
+ "entropy": 1.070437773577869,
344
+ "epoch": 0.475640891127199,
345
+ "grad_norm": 0.0308837890625,
346
+ "learning_rate": 0.00011300771136945658,
347
+ "loss": 1.002013931274414,
348
+ "mean_token_accuracy": 0.7574464529007673,
349
+ "num_tokens": 9985540.0,
350
+ "step": 850
351
+ },
352
+ {
353
+ "entropy": 1.0500496013974772,
354
+ "epoch": 0.48963032910152837,
355
+ "grad_norm": 0.033935546875,
356
+ "learning_rate": 0.00010850498724152798,
357
+ "loss": 1.0061278533935547,
358
+ "mean_token_accuracy": 0.7601391483098269,
359
+ "num_tokens": 10280858.0,
360
+ "step": 875
361
+ },
362
+ {
363
+ "entropy": 0.9829169751703739,
364
+ "epoch": 0.5036197670758578,
365
+ "grad_norm": 0.035888671875,
366
+ "learning_rate": 0.00010398481771111037,
367
+ "loss": 0.9166652679443359,
368
+ "mean_token_accuracy": 0.7795024861395359,
369
+ "num_tokens": 10571194.0,
370
+ "step": 900
371
+ },
372
+ {
373
+ "entropy": 0.9858249768940732,
374
+ "epoch": 0.5176092050501871,
375
+ "grad_norm": 0.038818359375,
376
+ "learning_rate": 9.94564745354137e-05,
377
+ "loss": 0.9250814819335937,
378
+ "mean_token_accuracy": 0.7783324559032917,
379
+ "num_tokens": 10861535.0,
380
+ "step": 925
381
+ },
382
+ {
383
+ "entropy": 0.9617049301974475,
384
+ "epoch": 0.5315986430245165,
385
+ "grad_norm": 0.0419921875,
386
+ "learning_rate": 9.492924623740271e-05,
387
+ "loss": 0.9093881988525391,
388
+ "mean_token_accuracy": 0.7818463468551635,
389
+ "num_tokens": 11157232.0,
390
+ "step": 950
391
+ },
392
+ {
393
+ "entropy": 0.9786588902026415,
394
+ "epoch": 0.5455880809988459,
395
+ "grad_norm": 0.031494140625,
396
+ "learning_rate": 9.04124190532087e-05,
397
+ "loss": 0.9290876007080078,
398
+ "mean_token_accuracy": 0.7784507688879967,
399
+ "num_tokens": 11451817.0,
400
+ "step": 975
401
+ },
402
+ {
403
+ "entropy": 0.9941231621522456,
404
+ "epoch": 0.5595775189731752,
405
+ "grad_norm": 0.03662109375,
406
+ "learning_rate": 8.591525788423168e-05,
407
+ "loss": 0.9281739807128906,
408
+ "mean_token_accuracy": 0.7773911864310503,
409
+ "num_tokens": 11746337.0,
410
+ "step": 1000
411
+ },
412
+ {
413
+ "entropy": 0.9537221440812573,
414
+ "epoch": 0.5735669569475046,
415
+ "grad_norm": 0.0303955078125,
416
+ "learning_rate": 8.144698729300455e-05,
417
+ "loss": 0.8921097564697266,
418
+ "mean_token_accuracy": 0.7855567722022534,
419
+ "num_tokens": 12037412.0,
420
+ "step": 1025
421
+ },
422
+ {
423
+ "entropy": 0.9557448910363019,
424
+ "epoch": 0.587556394921834,
425
+ "grad_norm": 0.029052734375,
426
+ "learning_rate": 7.701677258179996e-05,
427
+ "loss": 0.9018702697753906,
428
+ "mean_token_accuracy": 0.78552112005651,
429
+ "num_tokens": 12330073.0,
430
+ "step": 1050
431
+ },
432
+ {
433
+ "entropy": 0.9831135825067758,
434
+ "epoch": 0.6015458328961634,
435
+ "grad_norm": 0.031005859375,
436
+ "learning_rate": 7.263370099279172e-05,
437
+ "loss": 0.9220442199707031,
438
+ "mean_token_accuracy": 0.7789029122143983,
439
+ "num_tokens": 12623027.0,
440
+ "step": 1075
441
+ },
442
+ {
443
+ "entropy": 0.8838035979354754,
444
+ "epoch": 0.6155352708704928,
445
+ "grad_norm": 0.040283203125,
446
+ "learning_rate": 6.83067630683331e-05,
447
+ "loss": 0.8227420043945313,
448
+ "mean_token_accuracy": 0.8038330339640379,
449
+ "num_tokens": 12916473.0,
450
+ "step": 1100
451
+ },
452
+ {
453
+ "entropy": 0.883046350381337,
454
+ "epoch": 0.6295247088448221,
455
+ "grad_norm": 0.037353515625,
456
+ "learning_rate": 6.404483420958494e-05,
457
+ "loss": 0.8318240356445312,
458
+ "mean_token_accuracy": 0.8026394218951464,
459
+ "num_tokens": 13210350.0,
460
+ "step": 1125
461
+ },
462
+ {
463
+ "entropy": 0.9277670627878979,
464
+ "epoch": 0.6435141468191515,
465
+ "grad_norm": 0.02783203125,
466
+ "learning_rate": 5.9856656471321636e-05,
467
+ "loss": 0.8780873870849609,
468
+ "mean_token_accuracy": 0.7957132039964199,
469
+ "num_tokens": 13501483.0,
470
+ "step": 1150
471
+ },
472
+ {
473
+ "entropy": 0.9232714768499136,
474
+ "epoch": 0.6575035847934809,
475
+ "grad_norm": 0.0255126953125,
476
+ "learning_rate": 5.575082063025619e-05,
477
+ "loss": 0.8550299835205079,
478
+ "mean_token_accuracy": 0.7936624947935342,
479
+ "num_tokens": 13797592.0,
480
+ "step": 1175
481
+ },
482
+ {
483
+ "entropy": 0.899801154313609,
484
+ "epoch": 0.6714930227678103,
485
+ "grad_norm": 0.03369140625,
486
+ "learning_rate": 5.173574856366683e-05,
487
+ "loss": 0.8404230499267578,
488
+ "mean_token_accuracy": 0.7989044986665249,
489
+ "num_tokens": 14085126.0,
490
+ "step": 1200
491
+ },
492
+ {
493
+ "entropy": 0.8731577204307541,
494
+ "epoch": 0.6854824607421397,
495
+ "grad_norm": 0.033203125,
496
+ "learning_rate": 4.7819675974469356e-05,
497
+ "loss": 0.8161344146728515,
498
+ "mean_token_accuracy": 0.8059349462389946,
499
+ "num_tokens": 14383100.0,
500
+ "step": 1225
501
+ },
502
+ {
503
+ "entropy": 0.9630948188621551,
504
+ "epoch": 0.6994718987164691,
505
+ "grad_norm": 0.033935546875,
506
+ "learning_rate": 4.401063549816984e-05,
507
+ "loss": 0.91357177734375,
508
+ "mean_token_accuracy": 0.7841827914863825,
509
+ "num_tokens": 14682819.0,
510
+ "step": 1250
511
+ },
512
+ {
513
+ "entropy": 0.9606740972073748,
514
+ "epoch": 0.7134613366907985,
515
+ "grad_norm": 0.02783203125,
516
+ "learning_rate": 4.031644022634831e-05,
517
+ "loss": 0.896214599609375,
518
+ "mean_token_accuracy": 0.7852722837030888,
519
+ "num_tokens": 14972956.0,
520
+ "step": 1275
521
+ },
522
+ {
523
+ "entropy": 0.9392211610358209,
524
+ "epoch": 0.7274507746651279,
525
+ "grad_norm": 0.033447265625,
526
+ "learning_rate": 3.674466768047078e-05,
527
+ "loss": 0.8826549530029297,
528
+ "mean_token_accuracy": 0.7912480696290731,
529
+ "num_tokens": 15271172.0,
530
+ "step": 1300
531
+ },
532
+ {
533
+ "entropy": 0.9369741760706529,
534
+ "epoch": 0.7414402126394573,
535
+ "grad_norm": 0.0228271484375,
536
+ "learning_rate": 3.330264426890114e-05,
537
+ "loss": 0.8716287994384766,
538
+ "mean_token_accuracy": 0.7906265539675951,
539
+ "num_tokens": 15567414.0,
540
+ "step": 1325
541
+ },
542
+ {
543
+ "entropy": 0.8475008888402954,
544
+ "epoch": 0.7554296506137866,
545
+ "grad_norm": 0.02734375,
546
+ "learning_rate": 2.9997430258996208e-05,
547
+ "loss": 0.7837786865234375,
548
+ "mean_token_accuracy": 0.8113828019797802,
549
+ "num_tokens": 15862493.0,
550
+ "step": 1350
551
+ },
552
+ {
553
+ "entropy": 0.8544112277310342,
554
+ "epoch": 0.7694190885881159,
555
+ "grad_norm": 0.0400390625,
556
+ "learning_rate": 2.6835805295107897e-05,
557
+ "loss": 0.7910999298095703,
558
+ "mean_token_accuracy": 0.8121292810887099,
559
+ "num_tokens": 16154298.0,
560
+ "step": 1375
561
+ },
562
+ {
563
+ "entropy": 0.8836554439552128,
564
+ "epoch": 0.7834085265624453,
565
+ "grad_norm": 0.03271484375,
566
+ "learning_rate": 2.382425449219854e-05,
567
+ "loss": 0.8322444152832031,
568
+ "mean_token_accuracy": 0.8058089179545641,
569
+ "num_tokens": 16450244.0,
570
+ "step": 1400
571
+ },
572
+ {
573
+ "entropy": 0.9750821837875993,
574
+ "epoch": 0.7973979645367747,
575
+ "grad_norm": 0.0279541015625,
576
+ "learning_rate": 2.0968955133593805e-05,
577
+ "loss": 0.9110565948486328,
578
+ "mean_token_accuracy": 0.7827954424917698,
579
+ "num_tokens": 16742734.0,
580
+ "step": 1425
581
+ },
582
+ {
583
+ "entropy": 0.8045650019776076,
584
+ "epoch": 0.8113874025111041,
585
+ "grad_norm": 0.037841796875,
586
+ "learning_rate": 1.8275764000159222e-05,
587
+ "loss": 0.7386186218261719,
588
+ "mean_token_accuracy": 0.8229204141348601,
589
+ "num_tokens": 17040960.0,
590
+ "step": 1450
591
+ },
592
+ {
593
+ "entropy": 0.9300361970160157,
594
+ "epoch": 0.8253768404854335,
595
+ "grad_norm": 0.0303955078125,
596
+ "learning_rate": 1.5750205356889937e-05,
597
+ "loss": 0.8793846130371094,
598
+ "mean_token_accuracy": 0.7934108532965183,
599
+ "num_tokens": 17340398.0,
600
+ "step": 1475
601
+ },
602
+ {
603
+ "entropy": 0.9426807796442881,
604
+ "epoch": 0.8393662784597629,
605
+ "grad_norm": 0.032958984375,
606
+ "learning_rate": 1.339745962155613e-05,
607
+ "loss": 0.8905538940429687,
608
+ "mean_token_accuracy": 0.7892987384647131,
609
+ "num_tokens": 17629025.0,
610
+ "step": 1500
611
+ },
612
+ {
613
+ "entropy": 0.8541876428946853,
614
+ "epoch": 0.8533557164340922,
615
+ "grad_norm": 0.02587890625,
616
+ "learning_rate": 1.1222352738646825e-05,
617
+ "loss": 0.7829645538330078,
618
+ "mean_token_accuracy": 0.8128130162507295,
619
+ "num_tokens": 17923764.0,
620
+ "step": 1525
621
+ },
622
+ {
623
+ "entropy": 0.9169456198904663,
624
+ "epoch": 0.8673451544084216,
625
+ "grad_norm": 0.0238037109375,
626
+ "learning_rate": 9.229346280407925e-06,
627
+ "loss": 0.8602067565917969,
628
+ "mean_token_accuracy": 0.7955809989571572,
629
+ "num_tokens": 18210238.0,
630
+ "step": 1550
631
+ },
632
+ {
633
+ "entropy": 0.8634558629477397,
634
+ "epoch": 0.881334592382751,
635
+ "grad_norm": 0.028564453125,
636
+ "learning_rate": 7.422528295279685e-06,
637
+ "loss": 0.8013089752197265,
638
+ "mean_token_accuracy": 0.8093983814865351,
639
+ "num_tokens": 18507153.0,
640
+ "step": 1575
641
+ },
642
+ {
643
+ "entropy": 0.9312617677496746,
644
+ "epoch": 0.8953240303570804,
645
+ "grad_norm": 0.037109375,
646
+ "learning_rate": 5.805604922504859e-06,
647
+ "loss": 0.8762725830078125,
648
+ "mean_token_accuracy": 0.791949690952897,
649
+ "num_tokens": 18804977.0,
650
+ "step": 1600
651
+ },
652
+ {
653
+ "entropy": 0.9295520845707506,
654
+ "epoch": 0.9093134683314098,
655
+ "grad_norm": 0.029541015625,
656
+ "learning_rate": 4.381892790107811e-06,
657
+ "loss": 0.8709014129638671,
658
+ "mean_token_accuracy": 0.7921189369261264,
659
+ "num_tokens": 19097011.0,
660
+ "step": 1625
661
+ },
662
+ {
663
+ "entropy": 0.9084588387049735,
664
+ "epoch": 0.9233029063057392,
665
+ "grad_norm": 0.036376953125,
666
+ "learning_rate": 3.154312211837673e-06,
667
+ "loss": 0.8461515045166016,
668
+ "mean_token_accuracy": 0.7987799559533596,
669
+ "num_tokens": 19394487.0,
670
+ "step": 1650
671
+ },
672
+ {
673
+ "entropy": 0.9174220200441777,
674
+ "epoch": 0.9372923442800686,
675
+ "grad_norm": 0.029541015625,
676
+ "learning_rate": 2.125381197030374e-06,
677
+ "loss": 0.8725605010986328,
678
+ "mean_token_accuracy": 0.7959611015766859,
679
+ "num_tokens": 19686426.0,
680
+ "step": 1675
681
+ },
682
+ {
683
+ "entropy": 0.9678884049877524,
684
+ "epoch": 0.951281782254398,
685
+ "grad_norm": 0.0264892578125,
686
+ "learning_rate": 1.297210285675754e-06,
687
+ "loss": 0.9031341552734375,
688
+ "mean_token_accuracy": 0.785126566067338,
689
+ "num_tokens": 19976869.0,
690
+ "step": 1700
691
+ },
692
+ {
693
+ "entropy": 0.9488437540875748,
694
+ "epoch": 0.9652712202287274,
695
+ "grad_norm": 0.03515625,
696
+ "learning_rate": 6.71498219284894e-07,
697
+ "loss": 0.8841629028320312,
698
+ "mean_token_accuracy": 0.7887707254290581,
699
+ "num_tokens": 20267550.0,
700
+ "step": 1725
701
+ },
702
+ {
703
+ "entropy": 0.9077194698620588,
704
+ "epoch": 0.9792606582030567,
705
+ "grad_norm": 0.037109375,
706
+ "learning_rate": 2.4952845643689827e-07,
707
+ "loss": 0.830882797241211,
708
+ "mean_token_accuracy": 0.7997597184032201,
709
+ "num_tokens": 20562898.0,
710
+ "step": 1750
711
+ },
712
+ {
713
+ "entropy": 0.8242502619419247,
714
+ "epoch": 0.993250096177386,
715
+ "grad_norm": 0.03076171875,
716
+ "learning_rate": 3.216654015283371e-08,
717
+ "loss": 0.7700749969482422,
718
+ "mean_token_accuracy": 0.8184793063253164,
719
+ "num_tokens": 20856654.0,
720
+ "step": 1775
721
+ }
722
+ ],
723
+ "logging_steps": 25,
724
+ "max_steps": 1788,
725
+ "num_input_tokens_seen": 0,
726
+ "num_train_epochs": 1,
727
+ "save_steps": 500,
728
+ "stateful_callbacks": {
729
+ "TrainerControl": {
730
+ "args": {
731
+ "should_epoch_stop": false,
732
+ "should_evaluate": false,
733
+ "should_log": false,
734
+ "should_save": true,
735
+ "should_training_stop": true
736
+ },
737
+ "attributes": {}
738
+ }
739
+ },
740
+ "total_flos": 8.960165674320323e+17,
741
+ "train_batch_size": 1,
742
+ "trial_name": null,
743
+ "trial_params": null
744
+ }
checkpoint-1788/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0ea67ee3810c6e10021bd35d99633fb742a8db898d131b220e180054e0dcd7
3
+ size 5649
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": false,
24
+ "model_max_length": 131072,
25
+ "pad_token": "<|im_end|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0ea67ee3810c6e10021bd35d99633fb742a8db898d131b220e180054e0dcd7
3
+ size 5649