thetmon commited on
Commit
de6b0bf
·
verified ·
1 Parent(s): c836753

Upload LoRA adapter (README written by author)

Browse files
README.md CHANGED
@@ -1,46 +1,42 @@
1
  ---
2
  base_model: Qwen/Qwen3-4B-Instruct-2507
3
  datasets:
4
- - u-10bei/sft_alfworld_trajectory_dataset_v5
5
- - u-10bei/dbbench_sft_dataset_react_v4
6
  language:
7
  - en
8
  license: apache-2.0
9
  library_name: peft
10
  pipeline_tag: text-generation
11
  tags:
 
12
  - lora
13
- - agent
14
- - tool-use
15
- - alfworld
16
- - dbbench
17
  ---
18
 
19
- # Qwen3-4B ALFWorld+DBBench Mixed LoRA Adapter (r=16)
20
 
21
  This repository provides a **LoRA adapter** fine-tuned from
22
- **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**.
23
 
24
  This repository contains **LoRA adapter weights only**.
25
  The base model must be loaded separately.
26
 
27
  ## Training Objective
28
 
29
- This adapter is trained to improve **multi-turn agent task performance**
30
- on ALFWorld (household tasks) and DBBench (database operations).
31
 
32
- Loss is applied to **all assistant turns** in the multi-turn trajectory,
33
- enabling the model to learn environment observation, action selection,
34
- tool use, and recovery from errors.
35
 
36
  ## Training Configuration
37
 
38
  - Base model: Qwen/Qwen3-4B-Instruct-2507
39
- - Method: LoRA (full precision base)
40
  - Max sequence length: 2048
41
- - Epochs: 2
42
- - Learning rate: 2e-05
43
- - LoRA: r=16, alpha=32
44
 
45
  ## Usage
46
 
@@ -54,16 +50,14 @@ adapter = "your_id/your-repo"
54
 
55
  tokenizer = AutoTokenizer.from_pretrained(base)
56
  model = AutoModelForCausalLM.from_pretrained(
57
- base,
58
- torch_dtype=torch.float16,
59
- device_map="auto",
60
  )
61
  model = PeftModel.from_pretrained(model, adapter)
62
  ```
63
 
64
  ## Sources & Terms (IMPORTANT)
65
 
66
- Training data: u-10bei/sft_alfworld_trajectory_dataset_v5 + u-10bei/dbbench_sft_dataset_react_v4
67
 
68
- Dataset License: MIT License. This dataset is used and distributed under the terms of the MIT License.
69
- Compliance: Users must comply with the MIT license (including copyright notice) and the base model's original terms of use.
 
1
  ---
2
  base_model: Qwen/Qwen3-4B-Instruct-2507
3
  datasets:
4
+ - u-10bei/structured_data_with_cot_dataset
 
5
  language:
6
  - en
7
  license: apache-2.0
8
  library_name: peft
9
  pipeline_tag: text-generation
10
  tags:
11
+ - qlora
12
  - lora
13
+ - structured-output
 
 
 
14
  ---
15
 
16
+ <clean cot>
17
 
18
  This repository provides a **LoRA adapter** fine-tuned from
19
+ **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**.
20
 
21
  This repository contains **LoRA adapter weights only**.
22
  The base model must be loaded separately.
23
 
24
  ## Training Objective
25
 
26
+ This adapter is trained to improve **structured output accuracy**
27
+ (JSON / YAML / XML / TOML / CSV).
28
 
29
+ Loss is applied only to the final assistant output,
30
+ while intermediate reasoning (Chain-of-Thought) is masked.
 
31
 
32
  ## Training Configuration
33
 
34
  - Base model: Qwen/Qwen3-4B-Instruct-2507
35
+ - Method: QLoRA (4-bit)
36
  - Max sequence length: 2048
37
+ - Epochs: 3
38
+ - Learning rate: 2e-06
39
+ - LoRA: r=64, alpha=128
40
 
41
  ## Usage
42
 
 
50
 
51
  tokenizer = AutoTokenizer.from_pretrained(base)
52
  model = AutoModelForCausalLM.from_pretrained(
53
+ base, torch_dtype=torch.float16, device_map="auto",
 
 
54
  )
55
  model = PeftModel.from_pretrained(model, adapter)
56
  ```
57
 
58
  ## Sources & Terms (IMPORTANT)
59
 
60
+ Training data: u-10bei/structured_data_with_cot_dataset
61
 
62
+ Dataset License: MIT License.
63
+ Compliance: Users must comply with the MIT license and the base model's original terms of use.
adapter_config.json CHANGED
@@ -1,12 +1,18 @@
1
  {
 
2
  "alpha_pattern": {},
 
3
  "auto_mapping": {
4
  "base_model_class": "Qwen3ForCausalLM",
5
  "parent_library": "transformers.models.qwen3.modeling_qwen3",
6
  "unsloth_fixed": true
7
  },
8
- "base_model_name_or_path": "unsloth/Qwen3-4B-Instruct-2507",
9
  "bias": "none",
 
 
 
 
10
  "fan_in_fan_out": false,
11
  "inference_mode": true,
12
  "init_lora_weights": true,
@@ -14,25 +20,31 @@
14
  "layers_pattern": null,
15
  "layers_to_transform": null,
16
  "loftq_config": {},
17
- "lora_alpha": 32,
18
- "lora_dropout": 0.0,
 
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
- "r": 16,
 
 
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
- "v_proj",
28
- "o_proj",
29
- "gate_proj",
30
  "down_proj",
31
  "q_proj",
 
 
 
32
  "up_proj",
33
- "k_proj"
34
  ],
 
35
  "task_type": "CAUSAL_LM",
 
36
  "use_dora": false,
 
37
  "use_rslora": false
38
  }
 
1
  {
2
+ "alora_invocation_tokens": null,
3
  "alpha_pattern": {},
4
+ "arrow_config": null,
5
  "auto_mapping": {
6
  "base_model_class": "Qwen3ForCausalLM",
7
  "parent_library": "transformers.models.qwen3.modeling_qwen3",
8
  "unsloth_fixed": true
9
  },
10
+ "base_model_name_or_path": "unsloth/qwen3-4b-instruct-2507-unsloth-bnb-4bit",
11
  "bias": "none",
12
+ "corda_config": null,
13
+ "ensure_weight_tying": false,
14
+ "eva_config": null,
15
+ "exclude_modules": null,
16
  "fan_in_fan_out": false,
17
  "inference_mode": true,
18
  "init_lora_weights": true,
 
20
  "layers_pattern": null,
21
  "layers_to_transform": null,
22
  "loftq_config": {},
23
+ "lora_alpha": 128,
24
+ "lora_bias": false,
25
+ "lora_dropout": 0.04,
26
  "megatron_config": null,
27
  "megatron_core": "megatron.core",
28
  "modules_to_save": null,
29
  "peft_type": "LORA",
30
+ "peft_version": "0.18.1",
31
+ "qalora_group_size": 16,
32
+ "r": 64,
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
 
 
 
36
  "down_proj",
37
  "q_proj",
38
+ "k_proj",
39
+ "v_proj",
40
+ "gate_proj",
41
  "up_proj",
42
+ "o_proj"
43
  ],
44
+ "target_parameters": null,
45
  "task_type": "CAUSAL_LM",
46
+ "trainable_token_indices": null,
47
  "use_dora": false,
48
+ "use_qalora": false,
49
  "use_rslora": false
50
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4eb9831d0196d20a3f92f727b3cdb2371f030c5800005f460fb683783ccc2253
3
- size 132187888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39b86160b193273b97f6ece7c2459fdcc5a0ac434fdc6583bf6a652b800ecfba
3
+ size 528550256
tokenizer_config.json CHANGED
@@ -233,7 +233,7 @@
233
  "extra_special_tokens": {},
234
  "model_max_length": 262144,
235
  "pad_token": "<|vision_pad|>",
236
- "padding_side": "left",
237
  "split_special_tokens": false,
238
  "tokenizer_class": "Qwen2Tokenizer",
239
  "unk_token": null
 
233
  "extra_special_tokens": {},
234
  "model_max_length": 262144,
235
  "pad_token": "<|vision_pad|>",
236
+ "padding_side": "right",
237
  "split_special_tokens": false,
238
  "tokenizer_class": "Qwen2Tokenizer",
239
  "unk_token": null