| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.9972041006523766, |
| "eval_steps": 500, |
| "global_step": 201, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.03, |
| "learning_rate": 5.7142857142857145e-06, |
| "loss": 2.2584, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 1.1428571428571429e-05, |
| "loss": 1.676, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 1.7142857142857142e-05, |
| "loss": 1.6463, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.12, |
| "learning_rate": 1.9998688836656322e-05, |
| "loss": 1.584, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 1.998820159279591e-05, |
| "loss": 1.4092, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 1.9967238104745695e-05, |
| "loss": 1.7832, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 1.993582036030978e-05, |
| "loss": 2.0932, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.24, |
| "learning_rate": 1.9893981312363563e-05, |
| "loss": 2.524, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 1.9841764844290744e-05, |
| "loss": 1.9114, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 1.977922572395571e-05, |
| "loss": 1.5524, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 1.9706429546259592e-05, |
| "loss": 1.6035, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.36, |
| "learning_rate": 1.9623452664340305e-05, |
| "loss": 1.5664, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 1.953038210948861e-05, |
| "loss": 1.6138, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 1.9427315499864345e-05, |
| "loss": 1.9132, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 1.9314360938108427e-05, |
| "loss": 2.2387, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.48, |
| "learning_rate": 1.9191636897958123e-05, |
| "loss": 2.4396, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 1.905927209998447e-05, |
| "loss": 2.2283, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 1.8917405376582144e-05, |
| "loss": 1.8121, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 1.876618552635348e-05, |
| "loss": 1.552, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 1.8605771158039253e-05, |
| "loss": 1.5683, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 1.8436330524160048e-05, |
| "loss": 1.5337, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 1.8258041344542567e-05, |
| "loss": 1.8445, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 1.8071090619916095e-05, |
| "loss": 2.0517, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.72, |
| "learning_rate": 1.7875674435774546e-05, |
| "loss": 2.3032, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 1.767199775670986e-05, |
| "loss": 2.3695, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 1.7460274211432463e-05, |
| "loss": 1.611, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.81, |
| "learning_rate": 1.7240725868704218e-05, |
| "loss": 1.4434, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 1.7013583004418994e-05, |
| "loss": 1.6615, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.86, |
| "learning_rate": 1.6779083860075032e-05, |
| "loss": 1.5693, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 1.6537474392892527e-05, |
| "loss": 1.5989, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.92, |
| "learning_rate": 1.6289008017838447e-05, |
| "loss": 1.8186, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.95, |
| "learning_rate": 1.603394534182925e-05, |
| "loss": 2.0993, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 1.5772553890390196e-05, |
| "loss": 1.5693, |
| "step": 66 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 1.5505107827058038e-05, |
| "loss": 2.1505, |
| "step": 68 |
| }, |
| { |
| "epoch": 1.04, |
| "learning_rate": 1.52318876658213e-05, |
| "loss": 1.7584, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 1.4953179976899878e-05, |
| "loss": 1.4229, |
| "step": 72 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 1.4669277086172406e-05, |
| "loss": 1.611, |
| "step": 74 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 1.4380476768566825e-05, |
| "loss": 1.4103, |
| "step": 76 |
| }, |
| { |
| "epoch": 1.16, |
| "learning_rate": 1.4087081935735565e-05, |
| "loss": 1.4992, |
| "step": 78 |
| }, |
| { |
| "epoch": 1.19, |
| "learning_rate": 1.378940031834307e-05, |
| "loss": 1.7046, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 1.3487744143298822e-05, |
| "loss": 1.9403, |
| "step": 82 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 1.3182429806274442e-05, |
| "loss": 1.9072, |
| "step": 84 |
| }, |
| { |
| "epoch": 1.28, |
| "learning_rate": 1.2873777539848284e-05, |
| "loss": 1.7842, |
| "step": 86 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 1.2562111077625723e-05, |
| "loss": 1.5809, |
| "step": 88 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 1.2247757314687296e-05, |
| "loss": 1.4814, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 1.1931045964720882e-05, |
| "loss": 1.4703, |
| "step": 92 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 1.1612309214197599e-05, |
| "loss": 1.4649, |
| "step": 94 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 1.1291881373954066e-05, |
| "loss": 1.6343, |
| "step": 96 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 1.0970098528546482e-05, |
| "loss": 1.7428, |
| "step": 98 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 1.0647298183744359e-05, |
| "loss": 1.7637, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.52, |
| "learning_rate": 1.0323818912533561e-05, |
| "loss": 2.1005, |
| "step": 102 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 1e-05, |
| "loss": 1.4387, |
| "step": 104 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 9.676181087466444e-06, |
| "loss": 1.422, |
| "step": 106 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 9.352701816255643e-06, |
| "loss": 1.3836, |
| "step": 108 |
| }, |
| { |
| "epoch": 1.64, |
| "learning_rate": 9.02990147145352e-06, |
| "loss": 1.2649, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 8.708118626045939e-06, |
| "loss": 1.5681, |
| "step": 112 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 8.387690785802403e-06, |
| "loss": 1.8252, |
| "step": 114 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 8.068954035279121e-06, |
| "loss": 1.8779, |
| "step": 116 |
| }, |
| { |
| "epoch": 1.76, |
| "learning_rate": 7.752242685312709e-06, |
| "loss": 2.0095, |
| "step": 118 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 7.4378889223742766e-06, |
| "loss": 1.4449, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 7.126222460151719e-06, |
| "loss": 1.4738, |
| "step": 122 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 6.8175701937255645e-06, |
| "loss": 1.3797, |
| "step": 124 |
| }, |
| { |
| "epoch": 1.88, |
| "learning_rate": 6.5122558567011775e-06, |
| "loss": 1.3042, |
| "step": 126 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 6.210599681656933e-06, |
| "loss": 1.5955, |
| "step": 128 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 5.912918064264441e-06, |
| "loss": 1.7519, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 5.619523231433177e-06, |
| "loss": 1.3313, |
| "step": 132 |
| }, |
| { |
| "epoch": 2.0, |
| "learning_rate": 5.330722913827594e-06, |
| "loss": 1.5147, |
| "step": 134 |
| }, |
| { |
| "epoch": 2.03, |
| "learning_rate": 5.046820023100129e-06, |
| "loss": 2.0083, |
| "step": 136 |
| }, |
| { |
| "epoch": 2.06, |
| "learning_rate": 4.7681123341787e-06, |
| "loss": 1.4886, |
| "step": 138 |
| }, |
| { |
| "epoch": 2.09, |
| "learning_rate": 4.494892172941965e-06, |
| "loss": 1.4057, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.12, |
| "learning_rate": 4.2274461096098085e-06, |
| "loss": 1.3837, |
| "step": 142 |
| }, |
| { |
| "epoch": 2.15, |
| "learning_rate": 3.966054658170754e-06, |
| "loss": 1.438, |
| "step": 144 |
| }, |
| { |
| "epoch": 2.18, |
| "learning_rate": 3.7109919821615546e-06, |
| "loss": 1.3849, |
| "step": 146 |
| }, |
| { |
| "epoch": 2.21, |
| "learning_rate": 3.4625256071074776e-06, |
| "loss": 1.5841, |
| "step": 148 |
| }, |
| { |
| "epoch": 2.24, |
| "learning_rate": 3.2209161399249677e-06, |
| "loss": 1.4959, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.27, |
| "learning_rate": 2.9864169955810085e-06, |
| "loss": 2.3643, |
| "step": 152 |
| }, |
| { |
| "epoch": 2.3, |
| "learning_rate": 2.759274131295787e-06, |
| "loss": 1.6805, |
| "step": 154 |
| }, |
| { |
| "epoch": 2.33, |
| "learning_rate": 2.5397257885675396e-06, |
| "loss": 1.3571, |
| "step": 156 |
| }, |
| { |
| "epoch": 2.36, |
| "learning_rate": 2.328002243290138e-06, |
| "loss": 1.4686, |
| "step": 158 |
| }, |
| { |
| "epoch": 2.39, |
| "learning_rate": 2.124325564225458e-06, |
| "loss": 1.2663, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.42, |
| "learning_rate": 1.9289093800839067e-06, |
| "loss": 1.4305, |
| "step": 162 |
| }, |
| { |
| "epoch": 2.45, |
| "learning_rate": 1.7419586554574364e-06, |
| "loss": 1.5805, |
| "step": 164 |
| }, |
| { |
| "epoch": 2.48, |
| "learning_rate": 1.5636694758399563e-06, |
| "loss": 1.5783, |
| "step": 166 |
| }, |
| { |
| "epoch": 2.51, |
| "learning_rate": 1.3942288419607476e-06, |
| "loss": 2.0266, |
| "step": 168 |
| }, |
| { |
| "epoch": 2.53, |
| "learning_rate": 1.233814473646524e-06, |
| "loss": 1.4056, |
| "step": 170 |
| }, |
| { |
| "epoch": 2.56, |
| "learning_rate": 1.0825946234178575e-06, |
| "loss": 1.3858, |
| "step": 172 |
| }, |
| { |
| "epoch": 2.59, |
| "learning_rate": 9.407279000155311e-07, |
| "loss": 1.5584, |
| "step": 174 |
| }, |
| { |
| "epoch": 2.62, |
| "learning_rate": 8.083631020418792e-07, |
| "loss": 1.2246, |
| "step": 176 |
| }, |
| { |
| "epoch": 2.65, |
| "learning_rate": 6.856390618915775e-07, |
| "loss": 1.3859, |
| "step": 178 |
| }, |
| { |
| "epoch": 2.68, |
| "learning_rate": 5.726845001356573e-07, |
| "loss": 1.5712, |
| "step": 180 |
| }, |
| { |
| "epoch": 2.71, |
| "learning_rate": 4.696178905113913e-07, |
| "loss": 1.6031, |
| "step": 182 |
| }, |
| { |
| "epoch": 2.74, |
| "learning_rate": 3.7654733565969826e-07, |
| "loss": 2.1996, |
| "step": 184 |
| }, |
| { |
| "epoch": 2.77, |
| "learning_rate": 2.935704537404083e-07, |
| "loss": 1.4927, |
| "step": 186 |
| }, |
| { |
| "epoch": 2.8, |
| "learning_rate": 2.2077427604429435e-07, |
| "loss": 1.479, |
| "step": 188 |
| }, |
| { |
| "epoch": 2.83, |
| "learning_rate": 1.5823515570925763e-07, |
| "loss": 1.3484, |
| "step": 190 |
| }, |
| { |
| "epoch": 2.86, |
| "learning_rate": 1.0601868763643997e-07, |
| "loss": 1.1359, |
| "step": 192 |
| }, |
| { |
| "epoch": 2.89, |
| "learning_rate": 6.417963969022389e-08, |
| "loss": 1.5236, |
| "step": 194 |
| }, |
| { |
| "epoch": 2.92, |
| "learning_rate": 3.2761895254306285e-08, |
| "loss": 1.6316, |
| "step": 196 |
| }, |
| { |
| "epoch": 2.95, |
| "learning_rate": 1.179840720409331e-08, |
| "loss": 1.5152, |
| "step": 198 |
| }, |
| { |
| "epoch": 2.98, |
| "learning_rate": 1.3111633436779792e-09, |
| "loss": 1.2889, |
| "step": 200 |
| } |
| ], |
| "logging_steps": 2, |
| "max_steps": 201, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "total_flos": 1.1940452642598912e+16, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|