Jerry999 commited on
Commit
7280ef9
·
verified ·
1 Parent(s): 02dfe15

Upload checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305

Browse files
Files changed (35) hide show
  1. .gitattributes +3 -0
  2. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/adapter_config.json +46 -0
  3. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/adapter_model.safetensors +3 -0
  4. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/chat_template.jinja +4 -0
  5. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/optimizer.pt +3 -0
  6. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/rng_state.pth +3 -0
  7. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/scheduler.pt +3 -0
  8. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/tokenizer.json +3 -0
  9. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/tokenizer_config.json +29 -0
  10. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/tokens_state. +1 -0
  11. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/trainer_state.json +0 -0
  12. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/training_args.bin +3 -0
  13. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/adapter_config.json +46 -0
  14. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/adapter_model.safetensors +3 -0
  15. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/chat_template.jinja +4 -0
  16. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/optimizer.pt +3 -0
  17. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/rng_state.pth +3 -0
  18. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/scheduler.pt +3 -0
  19. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/tokenizer.json +3 -0
  20. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/tokenizer_config.json +29 -0
  21. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/tokens_state. +1 -0
  22. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/trainer_state.json +0 -0
  23. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/training_args.bin +3 -0
  24. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/adapter_config.json +46 -0
  25. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/adapter_model.safetensors +3 -0
  26. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/chat_template.jinja +4 -0
  27. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/optimizer.pt +3 -0
  28. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/rng_state.pth +3 -0
  29. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/scheduler.pt +3 -0
  30. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/tokenizer.json +3 -0
  31. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/tokenizer_config.json +29 -0
  32. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/tokens_state. +1 -0
  33. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/trainer_state.json +0 -0
  34. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/training_args.bin +3 -0
  35. checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/debug.log +0 -0
.gitattributes CHANGED
@@ -67,3 +67,6 @@ checkpoints/math_operations/lora_sft_primitive_atomic_50k/checkpoint-1031/tokeni
67
  checkpoints/math_operations/lora_sft_primitive_atomic_50k/checkpoint-2062/tokenizer.json filter=lfs diff=lfs merge=lfs -text
68
  checkpoints/math_operations/lora_sft_primitive_atomic_50k/checkpoint-3093/tokenizer.json filter=lfs diff=lfs merge=lfs -text
69
  checkpoints/math_operations/lora_sft_primitive_atomic_50k/checkpoint-4124/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
67
  checkpoints/math_operations/lora_sft_primitive_atomic_50k/checkpoint-2062/tokenizer.json filter=lfs diff=lfs merge=lfs -text
68
  checkpoints/math_operations/lora_sft_primitive_atomic_50k/checkpoint-3093/tokenizer.json filter=lfs diff=lfs merge=lfs -text
69
  checkpoints/math_operations/lora_sft_primitive_atomic_50k/checkpoint-4124/tokenizer.json filter=lfs diff=lfs merge=lfs -text
70
+ checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/tokenizer.json filter=lfs diff=lfs merge=lfs -text
71
+ checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/tokenizer.json filter=lfs diff=lfs merge=lfs -text
72
+ checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/tokenizer.json filter=lfs diff=lfs merge=lfs -text
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/home/jiaruil5/math_rl/mix_teachers/r3lit_rl/models/Qwen/Qwen3-4B-Instruct-2507",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": null,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "gate_proj",
33
+ "down_proj",
34
+ "o_proj",
35
+ "up_proj",
36
+ "q_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": [],
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dc472ce32f1b25c4d50f00b08d46701ba18b865750ab1ce3ae8b0b340938fe3
3
+ size 264308896
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ed344cf64c3d10e1c736fbeaa7ac98e55fa3be78ae03ade850d8c5157d6e112
3
+ size 528915403
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afc9973358d547e46b848631196f318dbea76ea73815db979f5cd0c0bc2c3255
3
+ size 14645
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29ee7c00d322f1ff1c83de02c03f989767004bd93b36afff519cb182231ddb1e
3
+ size 1465
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be75606093db2094d7cd20f3c2f385c212750648bd6ea4fb2bf507a6a4c55506
3
+ size 11422650
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 1010000,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/tokens_state. ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total": 50626560, "trainable": 16026800}
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-3090/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81f6c07c026eab6a492ebb3308ab20a3d2eadf85049e0ba07703b8660a0fb71f
3
+ size 7121
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/home/jiaruil5/math_rl/mix_teachers/r3lit_rl/models/Qwen/Qwen3-4B-Instruct-2507",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": null,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "gate_proj",
33
+ "down_proj",
34
+ "o_proj",
35
+ "up_proj",
36
+ "q_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": [],
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13a7b1c0691148654c87d0c4299d8844446d21af921eeaba4f468cff64db273f
3
+ size 264308896
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d83d55f5c030b2926c1058e4690599df5289b2d73f38a42e819af9c76070b8c7
3
+ size 528915403
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74c7330436b6bcb55e212921308afc74f209ae0195a22e6e9b4e80ae2959d989
3
+ size 14645
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:574960cb8125cd1ffe3dcc2de362ecb3b211d71baa812f38ece4e3e4834460d8
3
+ size 1465
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be75606093db2094d7cd20f3c2f385c212750648bd6ea4fb2bf507a6a4c55506
3
+ size 11422650
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 1010000,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/tokens_state. ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total": 67510272, "trainable": 21371364}
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-4120/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81f6c07c026eab6a492ebb3308ab20a3d2eadf85049e0ba07703b8660a0fb71f
3
+ size 7121
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "/home/jiaruil5/math_rl/mix_teachers/r3lit_rl/models/Qwen/Qwen3-4B-Instruct-2507",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": null,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "gate_proj",
33
+ "down_proj",
34
+ "o_proj",
35
+ "up_proj",
36
+ "q_proj",
37
+ "v_proj",
38
+ "k_proj"
39
+ ],
40
+ "target_parameters": [],
41
+ "task_type": "CAUSAL_LM",
42
+ "trainable_token_indices": null,
43
+ "use_dora": false,
44
+ "use_qalora": false,
45
+ "use_rslora": false
46
+ }
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f049b07b8eae6a4afb1b21ebae1f8ff9c4f4faa70b18d77de1c9968eadae396
3
+ size 264308896
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16ade70a64308b50b793c979a20a52963a8e5ab59a55a749a974c3348d2b9840
3
+ size 528915403
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2d0025e6262ea687da7129ca29ad07d6c942e957a8ad1555e972661fe0e7f1d
3
+ size 14645
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1645687525ebb96dbe9d65dc86517ece96d332667e6dc6db1b2ad055db8d1167
3
+ size 1465
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be75606093db2094d7cd20f3c2f385c212750648bd6ea4fb2bf507a6a4c55506
3
+ size 11422650
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 1010000,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/tokens_state. ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total": 84385792, "trainable": 26712160}
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/checkpoint-5150/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81f6c07c026eab6a492ebb3308ab20a3d2eadf85049e0ba07703b8660a0fb71f
3
+ size 7121
checkpoints/math_operations/lora_sft_primitive_atomic_50k_t20260305/debug.log ADDED
The diff for this file is too large to render. See raw diff