feat: consolidate all condition adapters into hub repo subdirectories (#4)
Browse files- feat: consolidate all condition adapters into hub repo subdirectories (e669ee8fcee93de9ec766706d595e4d2a8739cb1)
- condition-1-en-5k/adapter_config.json +50 -0
- condition-1-en-5k/adapter_model.safetensors +3 -0
- condition-1-en-5k/training_metrics.json +207 -0
- condition-2-es-5k/adapter_config.json +50 -0
- condition-2-es-5k/adapter_model.safetensors +3 -0
- condition-2-es-5k/training_metrics.json +207 -0
- condition-2-ur-5k/adapter_config.json +50 -0
- condition-2-ur-5k/adapter_model.safetensors +3 -0
- condition-2-zh-5k/adapter_config.json +50 -0
- condition-2-zh-5k/adapter_model.safetensors +3 -0
- condition-2-zh-5k/training_metrics.json +207 -0
- condition-3-zh-5k/adapter_config.json +50 -0
- condition-3-zh-5k/adapter_model.safetensors +3 -0
- condition-3-zh-5k/training_metrics.json +207 -0
condition-1-en-5k/adapter_config.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alora_invocation_tokens": null,
|
| 3 |
+
"alpha_pattern": {},
|
| 4 |
+
"arrow_config": null,
|
| 5 |
+
"auto_mapping": {
|
| 6 |
+
"base_model_class": "Cohere2ForCausalLM",
|
| 7 |
+
"parent_library": "transformers.models.cohere2.modeling_cohere2",
|
| 8 |
+
"unsloth_fixed": true
|
| 9 |
+
},
|
| 10 |
+
"base_model_name_or_path": "CohereLabs/tiny-aya-base",
|
| 11 |
+
"bias": "none",
|
| 12 |
+
"corda_config": null,
|
| 13 |
+
"ensure_weight_tying": false,
|
| 14 |
+
"eva_config": null,
|
| 15 |
+
"exclude_modules": null,
|
| 16 |
+
"fan_in_fan_out": false,
|
| 17 |
+
"inference_mode": true,
|
| 18 |
+
"init_lora_weights": true,
|
| 19 |
+
"layer_replication": null,
|
| 20 |
+
"layers_pattern": null,
|
| 21 |
+
"layers_to_transform": null,
|
| 22 |
+
"loftq_config": {},
|
| 23 |
+
"lora_alpha": 32,
|
| 24 |
+
"lora_bias": false,
|
| 25 |
+
"lora_dropout": 0.0,
|
| 26 |
+
"megatron_config": null,
|
| 27 |
+
"megatron_core": "megatron.core",
|
| 28 |
+
"modules_to_save": null,
|
| 29 |
+
"peft_type": "LORA",
|
| 30 |
+
"peft_version": "0.18.1",
|
| 31 |
+
"qalora_group_size": 16,
|
| 32 |
+
"r": 16,
|
| 33 |
+
"rank_pattern": {},
|
| 34 |
+
"revision": null,
|
| 35 |
+
"target_modules": [
|
| 36 |
+
"k_proj",
|
| 37 |
+
"q_proj",
|
| 38 |
+
"o_proj",
|
| 39 |
+
"up_proj",
|
| 40 |
+
"v_proj",
|
| 41 |
+
"gate_proj",
|
| 42 |
+
"down_proj"
|
| 43 |
+
],
|
| 44 |
+
"target_parameters": null,
|
| 45 |
+
"task_type": "CAUSAL_LM",
|
| 46 |
+
"trainable_token_indices": null,
|
| 47 |
+
"use_dora": false,
|
| 48 |
+
"use_qalora": false,
|
| 49 |
+
"use_rslora": false
|
| 50 |
+
}
|
condition-1-en-5k/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56d7139396d7a97a9b9e765f0d7c5e03ed114d88c52afbc648c4b87f9fd25832
|
| 3 |
+
size 120981200
|
condition-1-en-5k/training_metrics.json
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"loss": 1.0164,
|
| 4 |
+
"grad_norm": 0.039307065308094025,
|
| 5 |
+
"learning_rate": 0.00012,
|
| 6 |
+
"epoch": 0.03546099290780142,
|
| 7 |
+
"step": 10
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"loss": 1.1027,
|
| 11 |
+
"grad_norm": 0.03674926236271858,
|
| 12 |
+
"learning_rate": 0.00019988926445681492,
|
| 13 |
+
"epoch": 0.07092198581560284,
|
| 14 |
+
"step": 20
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"loss": 1.1062,
|
| 18 |
+
"grad_norm": 0.03771359100937843,
|
| 19 |
+
"learning_rate": 0.0001986463043917528,
|
| 20 |
+
"epoch": 0.10638297872340426,
|
| 21 |
+
"step": 30
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"loss": 1.0428,
|
| 25 |
+
"grad_norm": 0.03601245582103729,
|
| 26 |
+
"learning_rate": 0.00019603921063437793,
|
| 27 |
+
"epoch": 0.14184397163120568,
|
| 28 |
+
"step": 40
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"loss": 1.0116,
|
| 32 |
+
"grad_norm": 0.04361054301261902,
|
| 33 |
+
"learning_rate": 0.0001921040354671897,
|
| 34 |
+
"epoch": 0.1773049645390071,
|
| 35 |
+
"step": 50
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"loss": 1.0443,
|
| 39 |
+
"grad_norm": 0.04239841178059578,
|
| 40 |
+
"learning_rate": 0.00018689519659051467,
|
| 41 |
+
"epoch": 0.2127659574468085,
|
| 42 |
+
"step": 60
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"loss": 1.0685,
|
| 46 |
+
"grad_norm": 0.03858834505081177,
|
| 47 |
+
"learning_rate": 0.00018048472460553257,
|
| 48 |
+
"epoch": 0.24822695035460993,
|
| 49 |
+
"step": 70
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"loss": 1.0954,
|
| 53 |
+
"grad_norm": 0.058491941541433334,
|
| 54 |
+
"learning_rate": 0.00017296126693671884,
|
| 55 |
+
"epoch": 0.28368794326241137,
|
| 56 |
+
"step": 80
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"loss": 1.0308,
|
| 60 |
+
"grad_norm": 0.04502752423286438,
|
| 61 |
+
"learning_rate": 0.00016442886196799464,
|
| 62 |
+
"epoch": 0.3191489361702128,
|
| 63 |
+
"step": 90
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"loss": 1.0046,
|
| 67 |
+
"grad_norm": 0.03750888630747795,
|
| 68 |
+
"learning_rate": 0.00015500550034448413,
|
| 69 |
+
"epoch": 0.3546099290780142,
|
| 70 |
+
"step": 100
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"loss": 1.0566,
|
| 74 |
+
"grad_norm": 0.04296468198299408,
|
| 75 |
+
"learning_rate": 0.00014482149333496454,
|
| 76 |
+
"epoch": 0.3900709219858156,
|
| 77 |
+
"step": 110
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"loss": 1.0718,
|
| 81 |
+
"grad_norm": 0.04349366948008537,
|
| 82 |
+
"learning_rate": 0.0001340176708181637,
|
| 83 |
+
"epoch": 0.425531914893617,
|
| 84 |
+
"step": 120
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"loss": 1.0893,
|
| 88 |
+
"grad_norm": 0.04348432272672653,
|
| 89 |
+
"learning_rate": 0.00012274343381211066,
|
| 90 |
+
"epoch": 0.46099290780141844,
|
| 91 |
+
"step": 130
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"loss": 1.0306,
|
| 95 |
+
"grad_norm": 0.039543844759464264,
|
| 96 |
+
"learning_rate": 0.00011115468847720245,
|
| 97 |
+
"epoch": 0.49645390070921985,
|
| 98 |
+
"step": 140
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"loss": 1.0367,
|
| 102 |
+
"grad_norm": 0.03936546668410301,
|
| 103 |
+
"learning_rate": 9.941169016269379e-05,
|
| 104 |
+
"epoch": 0.5319148936170213,
|
| 105 |
+
"step": 150
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"loss": 1.0447,
|
| 109 |
+
"grad_norm": 0.03707996383309364,
|
| 110 |
+
"learning_rate": 8.767682731028415e-05,
|
| 111 |
+
"epoch": 0.5673758865248227,
|
| 112 |
+
"step": 160
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"loss": 1.0971,
|
| 116 |
+
"grad_norm": 0.04127993807196617,
|
| 117 |
+
"learning_rate": 7.611237586016557e-05,
|
| 118 |
+
"epoch": 0.6028368794326241,
|
| 119 |
+
"step": 170
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"loss": 1.0177,
|
| 123 |
+
"grad_norm": 0.04585001990199089,
|
| 124 |
+
"learning_rate": 6.487825521280109e-05,
|
| 125 |
+
"epoch": 0.6382978723404256,
|
| 126 |
+
"step": 180
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"loss": 1.0081,
|
| 130 |
+
"grad_norm": 0.04047301039099693,
|
| 131 |
+
"learning_rate": 5.4129816778190936e-05,
|
| 132 |
+
"epoch": 0.6737588652482269,
|
| 133 |
+
"step": 190
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"loss": 1.1236,
|
| 137 |
+
"grad_norm": 0.03668837621808052,
|
| 138 |
+
"learning_rate": 4.401569569374668e-05,
|
| 139 |
+
"epoch": 0.7092198581560284,
|
| 140 |
+
"step": 200
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"loss": 1.1207,
|
| 144 |
+
"grad_norm": 0.039949752390384674,
|
| 145 |
+
"learning_rate": 3.467575541836305e-05,
|
| 146 |
+
"epoch": 0.7446808510638298,
|
| 147 |
+
"step": 210
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"loss": 1.0138,
|
| 151 |
+
"grad_norm": 0.03711694851517677,
|
| 152 |
+
"learning_rate": 2.6239153625937784e-05,
|
| 153 |
+
"epoch": 0.7801418439716312,
|
| 154 |
+
"step": 220
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"loss": 1.0105,
|
| 158 |
+
"grad_norm": 0.05135485529899597,
|
| 159 |
+
"learning_rate": 1.882255614419376e-05,
|
| 160 |
+
"epoch": 0.8156028368794326,
|
| 161 |
+
"step": 230
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"loss": 1.0054,
|
| 165 |
+
"grad_norm": 0.04557114839553833,
|
| 166 |
+
"learning_rate": 1.2528523637410838e-05,
|
| 167 |
+
"epoch": 0.851063829787234,
|
| 168 |
+
"step": 240
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"loss": 1.1106,
|
| 172 |
+
"grad_norm": 0.04462781921029091,
|
| 173 |
+
"learning_rate": 7.4440933428779e-06,
|
| 174 |
+
"epoch": 0.8865248226950354,
|
| 175 |
+
"step": 250
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"loss": 1.0755,
|
| 179 |
+
"grad_norm": 0.03919963911175728,
|
| 180 |
+
"learning_rate": 3.6395754735699894e-06,
|
| 181 |
+
"epoch": 0.9219858156028369,
|
| 182 |
+
"step": 260
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"loss": 1.0508,
|
| 186 |
+
"grad_norm": 0.03482827916741371,
|
| 187 |
+
"learning_rate": 1.1675809310361497e-06,
|
| 188 |
+
"epoch": 0.9574468085106383,
|
| 189 |
+
"step": 270
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"loss": 1.062,
|
| 193 |
+
"grad_norm": 0.03561776503920555,
|
| 194 |
+
"learning_rate": 6.229377380218005e-08,
|
| 195 |
+
"epoch": 0.9929078014184397,
|
| 196 |
+
"step": 280
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"train_runtime": 4962.1033,
|
| 200 |
+
"train_samples_per_second": 0.907,
|
| 201 |
+
"train_steps_per_second": 0.057,
|
| 202 |
+
"total_flos": 7.86793932867502e+16,
|
| 203 |
+
"train_loss": 1.0566039296752172,
|
| 204 |
+
"epoch": 1.0,
|
| 205 |
+
"step": 282
|
| 206 |
+
}
|
| 207 |
+
]
|
condition-2-es-5k/adapter_config.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alora_invocation_tokens": null,
|
| 3 |
+
"alpha_pattern": {},
|
| 4 |
+
"arrow_config": null,
|
| 5 |
+
"auto_mapping": {
|
| 6 |
+
"base_model_class": "Cohere2ForCausalLM",
|
| 7 |
+
"parent_library": "transformers.models.cohere2.modeling_cohere2",
|
| 8 |
+
"unsloth_fixed": true
|
| 9 |
+
},
|
| 10 |
+
"base_model_name_or_path": "CohereLabs/tiny-aya-base",
|
| 11 |
+
"bias": "none",
|
| 12 |
+
"corda_config": null,
|
| 13 |
+
"ensure_weight_tying": false,
|
| 14 |
+
"eva_config": null,
|
| 15 |
+
"exclude_modules": null,
|
| 16 |
+
"fan_in_fan_out": false,
|
| 17 |
+
"inference_mode": true,
|
| 18 |
+
"init_lora_weights": true,
|
| 19 |
+
"layer_replication": null,
|
| 20 |
+
"layers_pattern": null,
|
| 21 |
+
"layers_to_transform": null,
|
| 22 |
+
"loftq_config": {},
|
| 23 |
+
"lora_alpha": 32,
|
| 24 |
+
"lora_bias": false,
|
| 25 |
+
"lora_dropout": 0.0,
|
| 26 |
+
"megatron_config": null,
|
| 27 |
+
"megatron_core": "megatron.core",
|
| 28 |
+
"modules_to_save": null,
|
| 29 |
+
"peft_type": "LORA",
|
| 30 |
+
"peft_version": "0.18.1",
|
| 31 |
+
"qalora_group_size": 16,
|
| 32 |
+
"r": 16,
|
| 33 |
+
"rank_pattern": {},
|
| 34 |
+
"revision": null,
|
| 35 |
+
"target_modules": [
|
| 36 |
+
"up_proj",
|
| 37 |
+
"k_proj",
|
| 38 |
+
"q_proj",
|
| 39 |
+
"gate_proj",
|
| 40 |
+
"v_proj",
|
| 41 |
+
"o_proj",
|
| 42 |
+
"down_proj"
|
| 43 |
+
],
|
| 44 |
+
"target_parameters": null,
|
| 45 |
+
"task_type": "CAUSAL_LM",
|
| 46 |
+
"trainable_token_indices": null,
|
| 47 |
+
"use_dora": false,
|
| 48 |
+
"use_qalora": false,
|
| 49 |
+
"use_rslora": false
|
| 50 |
+
}
|
condition-2-es-5k/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47972c3cbce9ff16b15a915a0614f82db3108406aab465c5bfd2f4450fa3ffd6
|
| 3 |
+
size 120981200
|
condition-2-es-5k/training_metrics.json
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"loss": 1.289,
|
| 4 |
+
"grad_norm": 0.06134699657559395,
|
| 5 |
+
"learning_rate": 0.00012,
|
| 6 |
+
"epoch": 0.03546099290780142,
|
| 7 |
+
"step": 10
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"loss": 1.2668,
|
| 11 |
+
"grad_norm": 0.08268137276172638,
|
| 12 |
+
"learning_rate": 0.00019988926445681492,
|
| 13 |
+
"epoch": 0.07092198581560284,
|
| 14 |
+
"step": 20
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"loss": 1.2005,
|
| 18 |
+
"grad_norm": 0.08327318727970123,
|
| 19 |
+
"learning_rate": 0.0001986463043917528,
|
| 20 |
+
"epoch": 0.10638297872340426,
|
| 21 |
+
"step": 30
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"loss": 1.1013,
|
| 25 |
+
"grad_norm": 0.06584063172340393,
|
| 26 |
+
"learning_rate": 0.00019603921063437793,
|
| 27 |
+
"epoch": 0.14184397163120568,
|
| 28 |
+
"step": 40
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"loss": 1.0508,
|
| 32 |
+
"grad_norm": 0.0763179138302803,
|
| 33 |
+
"learning_rate": 0.0001921040354671897,
|
| 34 |
+
"epoch": 0.1773049645390071,
|
| 35 |
+
"step": 50
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"loss": 1.0812,
|
| 39 |
+
"grad_norm": 0.07645361870527267,
|
| 40 |
+
"learning_rate": 0.00018689519659051467,
|
| 41 |
+
"epoch": 0.2127659574468085,
|
| 42 |
+
"step": 60
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"loss": 1.0913,
|
| 46 |
+
"grad_norm": 0.06947488337755203,
|
| 47 |
+
"learning_rate": 0.00018048472460553257,
|
| 48 |
+
"epoch": 0.24822695035460993,
|
| 49 |
+
"step": 70
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"loss": 1.1241,
|
| 53 |
+
"grad_norm": 0.10595150291919708,
|
| 54 |
+
"learning_rate": 0.00017296126693671884,
|
| 55 |
+
"epoch": 0.28368794326241137,
|
| 56 |
+
"step": 80
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"loss": 1.0546,
|
| 60 |
+
"grad_norm": 0.07947733253240585,
|
| 61 |
+
"learning_rate": 0.00016442886196799464,
|
| 62 |
+
"epoch": 0.3191489361702128,
|
| 63 |
+
"step": 90
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"loss": 1.0223,
|
| 67 |
+
"grad_norm": 0.07233906537294388,
|
| 68 |
+
"learning_rate": 0.00015500550034448413,
|
| 69 |
+
"epoch": 0.3546099290780142,
|
| 70 |
+
"step": 100
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"loss": 1.0651,
|
| 74 |
+
"grad_norm": 0.0785745233297348,
|
| 75 |
+
"learning_rate": 0.00014482149333496454,
|
| 76 |
+
"epoch": 0.3900709219858156,
|
| 77 |
+
"step": 110
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"loss": 1.0878,
|
| 81 |
+
"grad_norm": 0.07600134611129761,
|
| 82 |
+
"learning_rate": 0.0001340176708181637,
|
| 83 |
+
"epoch": 0.425531914893617,
|
| 84 |
+
"step": 120
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"loss": 1.1027,
|
| 88 |
+
"grad_norm": 0.08883103728294373,
|
| 89 |
+
"learning_rate": 0.00012274343381211066,
|
| 90 |
+
"epoch": 0.46099290780141844,
|
| 91 |
+
"step": 130
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"loss": 1.0414,
|
| 95 |
+
"grad_norm": 0.07369714975357056,
|
| 96 |
+
"learning_rate": 0.00011115468847720245,
|
| 97 |
+
"epoch": 0.49645390070921985,
|
| 98 |
+
"step": 140
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"loss": 1.0445,
|
| 102 |
+
"grad_norm": 0.07386789470911026,
|
| 103 |
+
"learning_rate": 9.941169016269379e-05,
|
| 104 |
+
"epoch": 0.5319148936170213,
|
| 105 |
+
"step": 150
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"loss": 1.0534,
|
| 109 |
+
"grad_norm": 0.06285598129034042,
|
| 110 |
+
"learning_rate": 8.767682731028415e-05,
|
| 111 |
+
"epoch": 0.5673758865248227,
|
| 112 |
+
"step": 160
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"loss": 1.1014,
|
| 116 |
+
"grad_norm": 0.07704062014818192,
|
| 117 |
+
"learning_rate": 7.611237586016557e-05,
|
| 118 |
+
"epoch": 0.6028368794326241,
|
| 119 |
+
"step": 170
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"loss": 1.0246,
|
| 123 |
+
"grad_norm": 0.0698208138346672,
|
| 124 |
+
"learning_rate": 6.487825521280109e-05,
|
| 125 |
+
"epoch": 0.6382978723404256,
|
| 126 |
+
"step": 180
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"loss": 1.0135,
|
| 130 |
+
"grad_norm": 0.0755171999335289,
|
| 131 |
+
"learning_rate": 5.4129816778190936e-05,
|
| 132 |
+
"epoch": 0.6737588652482269,
|
| 133 |
+
"step": 190
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"loss": 1.1256,
|
| 137 |
+
"grad_norm": 0.06397763639688492,
|
| 138 |
+
"learning_rate": 4.401569569374668e-05,
|
| 139 |
+
"epoch": 0.7092198581560284,
|
| 140 |
+
"step": 200
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"loss": 1.1244,
|
| 144 |
+
"grad_norm": 0.07543677091598511,
|
| 145 |
+
"learning_rate": 3.467575541836305e-05,
|
| 146 |
+
"epoch": 0.7446808510638298,
|
| 147 |
+
"step": 210
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"loss": 1.0198,
|
| 151 |
+
"grad_norm": 0.06617226451635361,
|
| 152 |
+
"learning_rate": 2.6239153625937784e-05,
|
| 153 |
+
"epoch": 0.7801418439716312,
|
| 154 |
+
"step": 220
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"loss": 1.0186,
|
| 158 |
+
"grad_norm": 0.09004773944616318,
|
| 159 |
+
"learning_rate": 1.882255614419376e-05,
|
| 160 |
+
"epoch": 0.8156028368794326,
|
| 161 |
+
"step": 230
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"loss": 1.0071,
|
| 165 |
+
"grad_norm": 0.07530700415372849,
|
| 166 |
+
"learning_rate": 1.2528523637410838e-05,
|
| 167 |
+
"epoch": 0.851063829787234,
|
| 168 |
+
"step": 240
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"loss": 1.1137,
|
| 172 |
+
"grad_norm": 0.07999535650014877,
|
| 173 |
+
"learning_rate": 7.4440933428779e-06,
|
| 174 |
+
"epoch": 0.8865248226950354,
|
| 175 |
+
"step": 250
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"loss": 1.0764,
|
| 179 |
+
"grad_norm": 0.06478076428174973,
|
| 180 |
+
"learning_rate": 3.6395754735699894e-06,
|
| 181 |
+
"epoch": 0.9219858156028369,
|
| 182 |
+
"step": 260
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"loss": 1.0549,
|
| 186 |
+
"grad_norm": 0.05741119384765625,
|
| 187 |
+
"learning_rate": 1.1675809310361497e-06,
|
| 188 |
+
"epoch": 0.9574468085106383,
|
| 189 |
+
"step": 270
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"loss": 1.0645,
|
| 193 |
+
"grad_norm": 0.05959143117070198,
|
| 194 |
+
"learning_rate": 6.229377380218005e-08,
|
| 195 |
+
"epoch": 0.9929078014184397,
|
| 196 |
+
"step": 280
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"train_runtime": 4558.3131,
|
| 200 |
+
"train_samples_per_second": 0.987,
|
| 201 |
+
"train_steps_per_second": 0.062,
|
| 202 |
+
"total_flos": 7.869358305338982e+16,
|
| 203 |
+
"train_loss": 1.0875117643505123,
|
| 204 |
+
"epoch": 1.0,
|
| 205 |
+
"step": 282
|
| 206 |
+
}
|
| 207 |
+
]
|
condition-2-ur-5k/adapter_config.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alora_invocation_tokens": null,
|
| 3 |
+
"alpha_pattern": {},
|
| 4 |
+
"arrow_config": null,
|
| 5 |
+
"auto_mapping": {
|
| 6 |
+
"base_model_class": "Cohere2ForCausalLM",
|
| 7 |
+
"parent_library": "transformers.models.cohere2.modeling_cohere2",
|
| 8 |
+
"unsloth_fixed": true
|
| 9 |
+
},
|
| 10 |
+
"base_model_name_or_path": "CohereLabs/tiny-aya-base",
|
| 11 |
+
"bias": "none",
|
| 12 |
+
"corda_config": null,
|
| 13 |
+
"ensure_weight_tying": false,
|
| 14 |
+
"eva_config": null,
|
| 15 |
+
"exclude_modules": null,
|
| 16 |
+
"fan_in_fan_out": false,
|
| 17 |
+
"inference_mode": true,
|
| 18 |
+
"init_lora_weights": true,
|
| 19 |
+
"layer_replication": null,
|
| 20 |
+
"layers_pattern": null,
|
| 21 |
+
"layers_to_transform": null,
|
| 22 |
+
"loftq_config": {},
|
| 23 |
+
"lora_alpha": 32,
|
| 24 |
+
"lora_bias": false,
|
| 25 |
+
"lora_dropout": 0.0,
|
| 26 |
+
"megatron_config": null,
|
| 27 |
+
"megatron_core": "megatron.core",
|
| 28 |
+
"modules_to_save": null,
|
| 29 |
+
"peft_type": "LORA",
|
| 30 |
+
"peft_version": "0.18.1",
|
| 31 |
+
"qalora_group_size": 16,
|
| 32 |
+
"r": 16,
|
| 33 |
+
"rank_pattern": {},
|
| 34 |
+
"revision": null,
|
| 35 |
+
"target_modules": [
|
| 36 |
+
"k_proj",
|
| 37 |
+
"v_proj",
|
| 38 |
+
"o_proj",
|
| 39 |
+
"q_proj",
|
| 40 |
+
"up_proj",
|
| 41 |
+
"down_proj",
|
| 42 |
+
"gate_proj"
|
| 43 |
+
],
|
| 44 |
+
"target_parameters": null,
|
| 45 |
+
"task_type": "CAUSAL_LM",
|
| 46 |
+
"trainable_token_indices": null,
|
| 47 |
+
"use_dora": false,
|
| 48 |
+
"use_qalora": false,
|
| 49 |
+
"use_rslora": false
|
| 50 |
+
}
|
condition-2-ur-5k/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:178b078bde04793fc012225157db82f0cba9df8baca7989842583f02fc7c1ee2
|
| 3 |
+
size 120981200
|
condition-2-zh-5k/adapter_config.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alora_invocation_tokens": null,
|
| 3 |
+
"alpha_pattern": {},
|
| 4 |
+
"arrow_config": null,
|
| 5 |
+
"auto_mapping": {
|
| 6 |
+
"base_model_class": "Cohere2ForCausalLM",
|
| 7 |
+
"parent_library": "transformers.models.cohere2.modeling_cohere2",
|
| 8 |
+
"unsloth_fixed": true
|
| 9 |
+
},
|
| 10 |
+
"base_model_name_or_path": "CohereLabs/tiny-aya-base",
|
| 11 |
+
"bias": "none",
|
| 12 |
+
"corda_config": null,
|
| 13 |
+
"ensure_weight_tying": false,
|
| 14 |
+
"eva_config": null,
|
| 15 |
+
"exclude_modules": null,
|
| 16 |
+
"fan_in_fan_out": false,
|
| 17 |
+
"inference_mode": true,
|
| 18 |
+
"init_lora_weights": true,
|
| 19 |
+
"layer_replication": null,
|
| 20 |
+
"layers_pattern": null,
|
| 21 |
+
"layers_to_transform": null,
|
| 22 |
+
"loftq_config": {},
|
| 23 |
+
"lora_alpha": 32,
|
| 24 |
+
"lora_bias": false,
|
| 25 |
+
"lora_dropout": 0.0,
|
| 26 |
+
"megatron_config": null,
|
| 27 |
+
"megatron_core": "megatron.core",
|
| 28 |
+
"modules_to_save": null,
|
| 29 |
+
"peft_type": "LORA",
|
| 30 |
+
"peft_version": "0.18.1",
|
| 31 |
+
"qalora_group_size": 16,
|
| 32 |
+
"r": 16,
|
| 33 |
+
"rank_pattern": {},
|
| 34 |
+
"revision": null,
|
| 35 |
+
"target_modules": [
|
| 36 |
+
"v_proj",
|
| 37 |
+
"up_proj",
|
| 38 |
+
"o_proj",
|
| 39 |
+
"k_proj",
|
| 40 |
+
"down_proj",
|
| 41 |
+
"q_proj",
|
| 42 |
+
"gate_proj"
|
| 43 |
+
],
|
| 44 |
+
"target_parameters": null,
|
| 45 |
+
"task_type": "CAUSAL_LM",
|
| 46 |
+
"trainable_token_indices": null,
|
| 47 |
+
"use_dora": false,
|
| 48 |
+
"use_qalora": false,
|
| 49 |
+
"use_rslora": false
|
| 50 |
+
}
|
condition-2-zh-5k/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d53fe2a8738acf06a0315971137dcdca0d5b26e1b93d410d5ae7cfb34545bf49
|
| 3 |
+
size 120981200
|
condition-2-zh-5k/training_metrics.json
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"loss": 1.2934,
|
| 4 |
+
"grad_norm": 0.06680645048618317,
|
| 5 |
+
"learning_rate": 0.00012,
|
| 6 |
+
"epoch": 0.03546099290780142,
|
| 7 |
+
"step": 10
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"loss": 1.2413,
|
| 11 |
+
"grad_norm": 0.07441197335720062,
|
| 12 |
+
"learning_rate": 0.00019988926445681492,
|
| 13 |
+
"epoch": 0.07092198581560284,
|
| 14 |
+
"step": 20
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"loss": 1.1801,
|
| 18 |
+
"grad_norm": 0.07900094240903854,
|
| 19 |
+
"learning_rate": 0.0001986463043917528,
|
| 20 |
+
"epoch": 0.10638297872340426,
|
| 21 |
+
"step": 30
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"loss": 1.0804,
|
| 25 |
+
"grad_norm": 0.06095115840435028,
|
| 26 |
+
"learning_rate": 0.00019603921063437793,
|
| 27 |
+
"epoch": 0.14184397163120568,
|
| 28 |
+
"step": 40
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"loss": 1.0369,
|
| 32 |
+
"grad_norm": 0.06544255465269089,
|
| 33 |
+
"learning_rate": 0.0001921040354671897,
|
| 34 |
+
"epoch": 0.1773049645390071,
|
| 35 |
+
"step": 50
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"loss": 1.0632,
|
| 39 |
+
"grad_norm": 0.06714016199111938,
|
| 40 |
+
"learning_rate": 0.00018689519659051467,
|
| 41 |
+
"epoch": 0.2127659574468085,
|
| 42 |
+
"step": 60
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"loss": 1.0759,
|
| 46 |
+
"grad_norm": 0.061957791447639465,
|
| 47 |
+
"learning_rate": 0.00018048472460553257,
|
| 48 |
+
"epoch": 0.24822695035460993,
|
| 49 |
+
"step": 70
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"loss": 1.1097,
|
| 53 |
+
"grad_norm": 0.09282051771879196,
|
| 54 |
+
"learning_rate": 0.00017296126693671884,
|
| 55 |
+
"epoch": 0.28368794326241137,
|
| 56 |
+
"step": 80
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"loss": 1.0426,
|
| 60 |
+
"grad_norm": 0.07323622703552246,
|
| 61 |
+
"learning_rate": 0.00016442886196799464,
|
| 62 |
+
"epoch": 0.3191489361702128,
|
| 63 |
+
"step": 90
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"loss": 1.0081,
|
| 67 |
+
"grad_norm": 0.05898305028676987,
|
| 68 |
+
"learning_rate": 0.00015500550034448413,
|
| 69 |
+
"epoch": 0.3546099290780142,
|
| 70 |
+
"step": 100
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"loss": 1.0533,
|
| 74 |
+
"grad_norm": 0.0711919292807579,
|
| 75 |
+
"learning_rate": 0.00014482149333496454,
|
| 76 |
+
"epoch": 0.3900709219858156,
|
| 77 |
+
"step": 110
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"loss": 1.0723,
|
| 81 |
+
"grad_norm": 0.06663306057453156,
|
| 82 |
+
"learning_rate": 0.0001340176708181637,
|
| 83 |
+
"epoch": 0.425531914893617,
|
| 84 |
+
"step": 120
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"loss": 1.0894,
|
| 88 |
+
"grad_norm": 0.07218194752931595,
|
| 89 |
+
"learning_rate": 0.00012274343381211066,
|
| 90 |
+
"epoch": 0.46099290780141844,
|
| 91 |
+
"step": 130
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"loss": 1.0284,
|
| 95 |
+
"grad_norm": 0.06280335038900375,
|
| 96 |
+
"learning_rate": 0.00011115468847720245,
|
| 97 |
+
"epoch": 0.49645390070921985,
|
| 98 |
+
"step": 140
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"loss": 1.0339,
|
| 102 |
+
"grad_norm": 0.06357726454734802,
|
| 103 |
+
"learning_rate": 9.941169016269379e-05,
|
| 104 |
+
"epoch": 0.5319148936170213,
|
| 105 |
+
"step": 150
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"loss": 1.0422,
|
| 109 |
+
"grad_norm": 0.05644501745700836,
|
| 110 |
+
"learning_rate": 8.767682731028415e-05,
|
| 111 |
+
"epoch": 0.5673758865248227,
|
| 112 |
+
"step": 160
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"loss": 1.0903,
|
| 116 |
+
"grad_norm": 0.06660397350788116,
|
| 117 |
+
"learning_rate": 7.611237586016557e-05,
|
| 118 |
+
"epoch": 0.6028368794326241,
|
| 119 |
+
"step": 170
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"loss": 1.0108,
|
| 123 |
+
"grad_norm": 0.0638660416007042,
|
| 124 |
+
"learning_rate": 6.487825521280109e-05,
|
| 125 |
+
"epoch": 0.6382978723404256,
|
| 126 |
+
"step": 180
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"loss": 1.0012,
|
| 130 |
+
"grad_norm": 0.06482306122779846,
|
| 131 |
+
"learning_rate": 5.4129816778190936e-05,
|
| 132 |
+
"epoch": 0.6737588652482269,
|
| 133 |
+
"step": 190
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"loss": 1.1147,
|
| 137 |
+
"grad_norm": 0.05531209707260132,
|
| 138 |
+
"learning_rate": 4.401569569374668e-05,
|
| 139 |
+
"epoch": 0.7092198581560284,
|
| 140 |
+
"step": 200
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"loss": 1.1137,
|
| 144 |
+
"grad_norm": 0.06445838510990143,
|
| 145 |
+
"learning_rate": 3.467575541836305e-05,
|
| 146 |
+
"epoch": 0.7446808510638298,
|
| 147 |
+
"step": 210
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"loss": 1.0079,
|
| 151 |
+
"grad_norm": 0.05836805701255798,
|
| 152 |
+
"learning_rate": 2.6239153625937784e-05,
|
| 153 |
+
"epoch": 0.7801418439716312,
|
| 154 |
+
"step": 220
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"loss": 1.0046,
|
| 158 |
+
"grad_norm": 0.0849972814321518,
|
| 159 |
+
"learning_rate": 1.882255614419376e-05,
|
| 160 |
+
"epoch": 0.8156028368794326,
|
| 161 |
+
"step": 230
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"loss": 0.9942,
|
| 165 |
+
"grad_norm": 0.06769896298646927,
|
| 166 |
+
"learning_rate": 1.2528523637410838e-05,
|
| 167 |
+
"epoch": 0.851063829787234,
|
| 168 |
+
"step": 240
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"loss": 1.1035,
|
| 172 |
+
"grad_norm": 0.07061105966567993,
|
| 173 |
+
"learning_rate": 7.4440933428779e-06,
|
| 174 |
+
"epoch": 0.8865248226950354,
|
| 175 |
+
"step": 250
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"loss": 1.0659,
|
| 179 |
+
"grad_norm": 0.05728829279541969,
|
| 180 |
+
"learning_rate": 3.6395754735699894e-06,
|
| 181 |
+
"epoch": 0.9219858156028369,
|
| 182 |
+
"step": 260
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"loss": 1.0457,
|
| 186 |
+
"grad_norm": 0.052473634481430054,
|
| 187 |
+
"learning_rate": 1.1675809310361497e-06,
|
| 188 |
+
"epoch": 0.9574468085106383,
|
| 189 |
+
"step": 270
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"loss": 1.0523,
|
| 193 |
+
"grad_norm": 0.05180735141038895,
|
| 194 |
+
"learning_rate": 6.229377380218005e-08,
|
| 195 |
+
"epoch": 0.9929078014184397,
|
| 196 |
+
"step": 280
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"train_runtime": 5040.1276,
|
| 200 |
+
"train_samples_per_second": 0.893,
|
| 201 |
+
"train_steps_per_second": 0.056,
|
| 202 |
+
"total_flos": 7.869481087716557e+16,
|
| 203 |
+
"train_loss": 1.0745156301674268,
|
| 204 |
+
"epoch": 1.0,
|
| 205 |
+
"step": 282
|
| 206 |
+
}
|
| 207 |
+
]
|
condition-3-zh-5k/adapter_config.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alora_invocation_tokens": null,
|
| 3 |
+
"alpha_pattern": {},
|
| 4 |
+
"arrow_config": null,
|
| 5 |
+
"auto_mapping": {
|
| 6 |
+
"base_model_class": "Cohere2ForCausalLM",
|
| 7 |
+
"parent_library": "transformers.models.cohere2.modeling_cohere2",
|
| 8 |
+
"unsloth_fixed": true
|
| 9 |
+
},
|
| 10 |
+
"base_model_name_or_path": "CohereLabs/tiny-aya-base",
|
| 11 |
+
"bias": "none",
|
| 12 |
+
"corda_config": null,
|
| 13 |
+
"ensure_weight_tying": false,
|
| 14 |
+
"eva_config": null,
|
| 15 |
+
"exclude_modules": null,
|
| 16 |
+
"fan_in_fan_out": false,
|
| 17 |
+
"inference_mode": true,
|
| 18 |
+
"init_lora_weights": true,
|
| 19 |
+
"layer_replication": null,
|
| 20 |
+
"layers_pattern": null,
|
| 21 |
+
"layers_to_transform": null,
|
| 22 |
+
"loftq_config": {},
|
| 23 |
+
"lora_alpha": 32,
|
| 24 |
+
"lora_bias": false,
|
| 25 |
+
"lora_dropout": 0.0,
|
| 26 |
+
"megatron_config": null,
|
| 27 |
+
"megatron_core": "megatron.core",
|
| 28 |
+
"modules_to_save": null,
|
| 29 |
+
"peft_type": "LORA",
|
| 30 |
+
"peft_version": "0.18.1",
|
| 31 |
+
"qalora_group_size": 16,
|
| 32 |
+
"r": 16,
|
| 33 |
+
"rank_pattern": {},
|
| 34 |
+
"revision": null,
|
| 35 |
+
"target_modules": [
|
| 36 |
+
"o_proj",
|
| 37 |
+
"up_proj",
|
| 38 |
+
"gate_proj",
|
| 39 |
+
"q_proj",
|
| 40 |
+
"k_proj",
|
| 41 |
+
"v_proj",
|
| 42 |
+
"down_proj"
|
| 43 |
+
],
|
| 44 |
+
"target_parameters": null,
|
| 45 |
+
"task_type": "CAUSAL_LM",
|
| 46 |
+
"trainable_token_indices": null,
|
| 47 |
+
"use_dora": false,
|
| 48 |
+
"use_qalora": false,
|
| 49 |
+
"use_rslora": false
|
| 50 |
+
}
|
condition-3-zh-5k/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:793136a7a1478440e06697d23124679d66b0171e124e2bcdc81a68861a5f2d73
|
| 3 |
+
size 120981200
|
condition-3-zh-5k/training_metrics.json
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"loss": 1.3593,
|
| 4 |
+
"grad_norm": 0.07132186740636826,
|
| 5 |
+
"learning_rate": 0.00012,
|
| 6 |
+
"epoch": 0.03546099290780142,
|
| 7 |
+
"step": 10
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"loss": 1.2212,
|
| 11 |
+
"grad_norm": 0.06576087325811386,
|
| 12 |
+
"learning_rate": 0.00019988926445681492,
|
| 13 |
+
"epoch": 0.07092198581560284,
|
| 14 |
+
"step": 20
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"loss": 1.4382,
|
| 18 |
+
"grad_norm": 0.064721018075943,
|
| 19 |
+
"learning_rate": 0.0001986463043917528,
|
| 20 |
+
"epoch": 0.10638297872340426,
|
| 21 |
+
"step": 30
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"loss": 1.1636,
|
| 25 |
+
"grad_norm": 0.06784526258707047,
|
| 26 |
+
"learning_rate": 0.00019603921063437793,
|
| 27 |
+
"epoch": 0.14184397163120568,
|
| 28 |
+
"step": 40
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"loss": 1.1023,
|
| 32 |
+
"grad_norm": 0.06855457276105881,
|
| 33 |
+
"learning_rate": 0.0001921040354671897,
|
| 34 |
+
"epoch": 0.1773049645390071,
|
| 35 |
+
"step": 50
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"loss": 1.159,
|
| 39 |
+
"grad_norm": 0.0872117206454277,
|
| 40 |
+
"learning_rate": 0.00018689519659051467,
|
| 41 |
+
"epoch": 0.2127659574468085,
|
| 42 |
+
"step": 60
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"loss": 1.2097,
|
| 46 |
+
"grad_norm": 0.06708938628435135,
|
| 47 |
+
"learning_rate": 0.00018048472460553257,
|
| 48 |
+
"epoch": 0.24822695035460993,
|
| 49 |
+
"step": 70
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"loss": 1.1172,
|
| 53 |
+
"grad_norm": 0.07587900757789612,
|
| 54 |
+
"learning_rate": 0.00017296126693671884,
|
| 55 |
+
"epoch": 0.28368794326241137,
|
| 56 |
+
"step": 80
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"loss": 1.0976,
|
| 60 |
+
"grad_norm": 0.07327847182750702,
|
| 61 |
+
"learning_rate": 0.00016442886196799464,
|
| 62 |
+
"epoch": 0.3191489361702128,
|
| 63 |
+
"step": 90
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"loss": 1.1342,
|
| 67 |
+
"grad_norm": 0.07166396826505661,
|
| 68 |
+
"learning_rate": 0.00015500550034448413,
|
| 69 |
+
"epoch": 0.3546099290780142,
|
| 70 |
+
"step": 100
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"loss": 1.1876,
|
| 74 |
+
"grad_norm": 0.08132865279912949,
|
| 75 |
+
"learning_rate": 0.00014482149333496454,
|
| 76 |
+
"epoch": 0.3900709219858156,
|
| 77 |
+
"step": 110
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"loss": 1.2186,
|
| 81 |
+
"grad_norm": 0.08324161171913147,
|
| 82 |
+
"learning_rate": 0.0001340176708181637,
|
| 83 |
+
"epoch": 0.425531914893617,
|
| 84 |
+
"step": 120
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"loss": 1.2672,
|
| 88 |
+
"grad_norm": 0.08210298418998718,
|
| 89 |
+
"learning_rate": 0.00012274343381211066,
|
| 90 |
+
"epoch": 0.46099290780141844,
|
| 91 |
+
"step": 130
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"loss": 1.1838,
|
| 95 |
+
"grad_norm": 0.06662478297948837,
|
| 96 |
+
"learning_rate": 0.00011115468847720245,
|
| 97 |
+
"epoch": 0.49645390070921985,
|
| 98 |
+
"step": 140
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"loss": 1.161,
|
| 102 |
+
"grad_norm": 0.08010110259056091,
|
| 103 |
+
"learning_rate": 9.941169016269379e-05,
|
| 104 |
+
"epoch": 0.5319148936170213,
|
| 105 |
+
"step": 150
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"loss": 1.2318,
|
| 109 |
+
"grad_norm": 0.09503161162137985,
|
| 110 |
+
"learning_rate": 8.767682731028415e-05,
|
| 111 |
+
"epoch": 0.5673758865248227,
|
| 112 |
+
"step": 160
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"loss": 1.1064,
|
| 116 |
+
"grad_norm": 0.06613818556070328,
|
| 117 |
+
"learning_rate": 7.611237586016557e-05,
|
| 118 |
+
"epoch": 0.6028368794326241,
|
| 119 |
+
"step": 170
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"loss": 1.1023,
|
| 123 |
+
"grad_norm": 0.08144712448120117,
|
| 124 |
+
"learning_rate": 6.487825521280109e-05,
|
| 125 |
+
"epoch": 0.6382978723404256,
|
| 126 |
+
"step": 180
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"loss": 1.0777,
|
| 130 |
+
"grad_norm": 0.07447583973407745,
|
| 131 |
+
"learning_rate": 5.4129816778190936e-05,
|
| 132 |
+
"epoch": 0.6737588652482269,
|
| 133 |
+
"step": 190
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"loss": 1.0299,
|
| 137 |
+
"grad_norm": 0.08357255160808563,
|
| 138 |
+
"learning_rate": 4.401569569374668e-05,
|
| 139 |
+
"epoch": 0.7092198581560284,
|
| 140 |
+
"step": 200
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"loss": 1.1504,
|
| 144 |
+
"grad_norm": 0.07654684036970139,
|
| 145 |
+
"learning_rate": 3.467575541836305e-05,
|
| 146 |
+
"epoch": 0.7446808510638298,
|
| 147 |
+
"step": 210
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"loss": 1.0784,
|
| 151 |
+
"grad_norm": 0.07995031774044037,
|
| 152 |
+
"learning_rate": 2.6239153625937784e-05,
|
| 153 |
+
"epoch": 0.7801418439716312,
|
| 154 |
+
"step": 220
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"loss": 1.0866,
|
| 158 |
+
"grad_norm": 0.08516442775726318,
|
| 159 |
+
"learning_rate": 1.882255614419376e-05,
|
| 160 |
+
"epoch": 0.8156028368794326,
|
| 161 |
+
"step": 230
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"loss": 1.1881,
|
| 165 |
+
"grad_norm": 0.06854885816574097,
|
| 166 |
+
"learning_rate": 1.2528523637410838e-05,
|
| 167 |
+
"epoch": 0.851063829787234,
|
| 168 |
+
"step": 240
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"loss": 1.1803,
|
| 172 |
+
"grad_norm": 0.07308827340602875,
|
| 173 |
+
"learning_rate": 7.4440933428779e-06,
|
| 174 |
+
"epoch": 0.8865248226950354,
|
| 175 |
+
"step": 250
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"loss": 1.252,
|
| 179 |
+
"grad_norm": 0.082114577293396,
|
| 180 |
+
"learning_rate": 3.6395754735699894e-06,
|
| 181 |
+
"epoch": 0.9219858156028369,
|
| 182 |
+
"step": 260
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"loss": 0.9981,
|
| 186 |
+
"grad_norm": 0.08104278147220612,
|
| 187 |
+
"learning_rate": 1.1675809310361497e-06,
|
| 188 |
+
"epoch": 0.9574468085106383,
|
| 189 |
+
"step": 270
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"loss": 1.0975,
|
| 193 |
+
"grad_norm": 0.1148340180516243,
|
| 194 |
+
"learning_rate": 6.229377380218005e-08,
|
| 195 |
+
"epoch": 0.9929078014184397,
|
| 196 |
+
"step": 280
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"train_runtime": 5043.2773,
|
| 200 |
+
"train_samples_per_second": 0.892,
|
| 201 |
+
"train_steps_per_second": 0.056,
|
| 202 |
+
"total_flos": 7.853598969744589e+16,
|
| 203 |
+
"train_loss": 1.1630526971309743,
|
| 204 |
+
"epoch": 1.0,
|
| 205 |
+
"step": 282
|
| 206 |
+
}
|
| 207 |
+
]
|