File size: 6,084 Bytes
90021b4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 | {
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 100,
"global_step": 252,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11940298507462686,
"grad_norm": 0.2085999995470047,
"learning_rate": 3.461538461538462e-05,
"loss": 0.5328742027282715,
"step": 10
},
{
"epoch": 0.23880597014925373,
"grad_norm": 0.17245711386203766,
"learning_rate": 7.307692307692307e-05,
"loss": 0.28172030448913576,
"step": 20
},
{
"epoch": 0.3582089552238806,
"grad_norm": 0.1451202780008316,
"learning_rate": 9.995652871840006e-05,
"loss": 0.22340788841247558,
"step": 30
},
{
"epoch": 0.47761194029850745,
"grad_norm": 0.12613239884376526,
"learning_rate": 9.918580696928205e-05,
"loss": 0.19026432037353516,
"step": 40
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.10020812600851059,
"learning_rate": 9.746617916180905e-05,
"loss": 0.1776690363883972,
"step": 50
},
{
"epoch": 0.7164179104477612,
"grad_norm": 0.11177698522806168,
"learning_rate": 9.483082082658983e-05,
"loss": 0.16183719635009766,
"step": 60
},
{
"epoch": 0.835820895522388,
"grad_norm": 0.11473167687654495,
"learning_rate": 9.133057401836159e-05,
"loss": 0.18729711771011354,
"step": 70
},
{
"epoch": 0.9552238805970149,
"grad_norm": 0.09883058816194534,
"learning_rate": 8.703296645714609e-05,
"loss": 0.17510976791381835,
"step": 80
},
{
"epoch": 1.0716417910447762,
"grad_norm": 0.09465258568525314,
"learning_rate": 8.202090876498144e-05,
"loss": 0.15757997035980226,
"step": 90
},
{
"epoch": 1.191044776119403,
"grad_norm": 0.15254873037338257,
"learning_rate": 7.639109493149537e-05,
"loss": 0.14641659259796141,
"step": 100
},
{
"epoch": 1.191044776119403,
"eval_loss": 0.1869809627532959,
"eval_runtime": 13.646,
"eval_samples_per_second": 1.099,
"eval_steps_per_second": 0.586,
"step": 100
},
{
"epoch": 1.31044776119403,
"grad_norm": 0.08413254469633102,
"learning_rate": 7.02521368669762e-05,
"loss": 0.11394469738006592,
"step": 110
},
{
"epoch": 1.4298507462686567,
"grad_norm": 0.07326336950063705,
"learning_rate": 6.372246903165446e-05,
"loss": 0.11726579666137696,
"step": 120
},
{
"epoch": 1.5492537313432835,
"grad_norm": 0.10139299929141998,
"learning_rate": 5.692806356566095e-05,
"loss": 0.12615565061569214,
"step": 130
},
{
"epoch": 1.6686567164179105,
"grad_norm": 0.08335951715707779,
"learning_rate": 5e-05,
"loss": 0.11287055015563965,
"step": 140
},
{
"epoch": 1.7880597014925375,
"grad_norm": 0.07054048031568527,
"learning_rate": 4.307193643433907e-05,
"loss": 0.10663604736328125,
"step": 150
},
{
"epoch": 1.9074626865671642,
"grad_norm": 0.06786229461431503,
"learning_rate": 3.627753096834555e-05,
"loss": 0.107212233543396,
"step": 160
},
{
"epoch": 2.023880597014925,
"grad_norm": 0.07071981579065323,
"learning_rate": 2.9747863133023803e-05,
"loss": 0.11782596111297608,
"step": 170
},
{
"epoch": 2.1432835820895524,
"grad_norm": 0.09423048794269562,
"learning_rate": 2.3608905068504637e-05,
"loss": 0.0834022581577301,
"step": 180
},
{
"epoch": 2.262686567164179,
"grad_norm": 0.09003670513629913,
"learning_rate": 1.7979091235018565e-05,
"loss": 0.08206533193588257,
"step": 190
},
{
"epoch": 2.382089552238806,
"grad_norm": 0.09670283645391464,
"learning_rate": 1.2967033542853917e-05,
"loss": 0.08110529780387879,
"step": 200
},
{
"epoch": 2.382089552238806,
"eval_loss": 0.1738630086183548,
"eval_runtime": 13.5313,
"eval_samples_per_second": 1.109,
"eval_steps_per_second": 0.591,
"step": 200
},
{
"epoch": 2.5014925373134327,
"grad_norm": 0.09121183305978775,
"learning_rate": 8.669425981638413e-06,
"loss": 0.08261927366256713,
"step": 210
},
{
"epoch": 2.62089552238806,
"grad_norm": 0.09510096907615662,
"learning_rate": 5.169179173410177e-06,
"loss": 0.08233516216278076,
"step": 220
},
{
"epoch": 2.7402985074626867,
"grad_norm": 0.07058050483465195,
"learning_rate": 2.533820838190959e-06,
"loss": 0.07825937271118164,
"step": 230
},
{
"epoch": 2.8597014925373134,
"grad_norm": 0.09087616205215454,
"learning_rate": 8.141930307179468e-07,
"loss": 0.08194915056228638,
"step": 240
},
{
"epoch": 2.97910447761194,
"grad_norm": 0.078521728515625,
"learning_rate": 4.3471281599938295e-08,
"loss": 0.08224834203720092,
"step": 250
},
{
"epoch": 3.0,
"step": 252,
"total_flos": 1.3584580095246336e+16,
"train_loss": 0.14724678906892974,
"train_runtime": 4599.332,
"train_samples_per_second": 0.437,
"train_steps_per_second": 0.055
}
],
"logging_steps": 10,
"max_steps": 252,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3584580095246336e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|