| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 3.0, |
| "eval_steps": 0, |
| "global_step": 63, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.047619047619047616, |
| "grad_norm": 271.2847900390625, |
| "learning_rate": 0.0, |
| "loss": 15.6328, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.09523809523809523, |
| "grad_norm": 283.8895263671875, |
| "learning_rate": 2.8571428571428573e-06, |
| "loss": 15.7266, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.14285714285714285, |
| "grad_norm": 239.6302490234375, |
| "learning_rate": 5.7142857142857145e-06, |
| "loss": 14.4062, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.19047619047619047, |
| "grad_norm": 131.42355346679688, |
| "learning_rate": 8.571428571428571e-06, |
| "loss": 6.6289, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.23809523809523808, |
| "grad_norm": 26.378475189208984, |
| "learning_rate": 1.1428571428571429e-05, |
| "loss": 0.9219, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.2857142857142857, |
| "grad_norm": 10.474356651306152, |
| "learning_rate": 1.4285714285714287e-05, |
| "loss": 0.4797, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.3333333333333333, |
| "grad_norm": 2.809896469116211, |
| "learning_rate": 1.7142857142857142e-05, |
| "loss": 0.1807, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.38095238095238093, |
| "grad_norm": 2.836686372756958, |
| "learning_rate": 2e-05, |
| "loss": 0.1663, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.42857142857142855, |
| "grad_norm": 1.031554937362671, |
| "learning_rate": 1.998426815017817e-05, |
| "loss": 0.1649, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.47619047619047616, |
| "grad_norm": 3.8881802558898926, |
| "learning_rate": 1.9937122098932428e-05, |
| "loss": 0.1857, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.5238095238095238, |
| "grad_norm": 2.4832403659820557, |
| "learning_rate": 1.985871018518236e-05, |
| "loss": 0.1583, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.5714285714285714, |
| "grad_norm": 2.085007429122925, |
| "learning_rate": 1.9749279121818235e-05, |
| "loss": 0.1541, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.6190476190476191, |
| "grad_norm": 2.2945327758789062, |
| "learning_rate": 1.9609173219450998e-05, |
| "loss": 0.1553, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.6666666666666666, |
| "grad_norm": 2.169069290161133, |
| "learning_rate": 1.9438833303083677e-05, |
| "loss": 0.1539, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.7142857142857143, |
| "grad_norm": 1.0928995609283447, |
| "learning_rate": 1.9238795325112867e-05, |
| "loss": 0.1472, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.7619047619047619, |
| "grad_norm": 2.304161548614502, |
| "learning_rate": 1.900968867902419e-05, |
| "loss": 0.1589, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.8095238095238095, |
| "grad_norm": 1.644942283630371, |
| "learning_rate": 1.8752234219087538e-05, |
| "loss": 0.1449, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.8571428571428571, |
| "grad_norm": 0.959534764289856, |
| "learning_rate": 1.8467241992282842e-05, |
| "loss": 0.155, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.9047619047619048, |
| "grad_norm": 21.322084426879883, |
| "learning_rate": 1.8155608689592604e-05, |
| "loss": 0.2281, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.9523809523809523, |
| "grad_norm": 1.6131975650787354, |
| "learning_rate": 1.78183148246803e-05, |
| "loss": 0.1748, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 1.9911768436431885, |
| "learning_rate": 1.7456421648831658e-05, |
| "loss": 0.1499, |
| "step": 21 |
| }, |
| { |
| "epoch": 1.0476190476190477, |
| "grad_norm": 0.7157345414161682, |
| "learning_rate": 1.7071067811865477e-05, |
| "loss": 0.1598, |
| "step": 22 |
| }, |
| { |
| "epoch": 1.0952380952380953, |
| "grad_norm": 1.3037433624267578, |
| "learning_rate": 1.6663465779520042e-05, |
| "loss": 0.1428, |
| "step": 23 |
| }, |
| { |
| "epoch": 1.1428571428571428, |
| "grad_norm": 1.650227427482605, |
| "learning_rate": 1.6234898018587336e-05, |
| "loss": 0.1381, |
| "step": 24 |
| }, |
| { |
| "epoch": 1.1904761904761905, |
| "grad_norm": 1.1550507545471191, |
| "learning_rate": 1.578671296179806e-05, |
| "loss": 0.126, |
| "step": 25 |
| }, |
| { |
| "epoch": 1.2380952380952381, |
| "grad_norm": 0.8351575136184692, |
| "learning_rate": 1.5320320765153367e-05, |
| "loss": 0.1112, |
| "step": 26 |
| }, |
| { |
| "epoch": 1.2857142857142856, |
| "grad_norm": 1.8693701028823853, |
| "learning_rate": 1.4837188871052399e-05, |
| "loss": 0.1158, |
| "step": 27 |
| }, |
| { |
| "epoch": 1.3333333333333333, |
| "grad_norm": 1.883232593536377, |
| "learning_rate": 1.4338837391175582e-05, |
| "loss": 0.0951, |
| "step": 28 |
| }, |
| { |
| "epoch": 1.380952380952381, |
| "grad_norm": 2.0021462440490723, |
| "learning_rate": 1.3826834323650899e-05, |
| "loss": 0.141, |
| "step": 29 |
| }, |
| { |
| "epoch": 1.4285714285714286, |
| "grad_norm": 0.9316145181655884, |
| "learning_rate": 1.3302790619551673e-05, |
| "loss": 0.1143, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.4761904761904763, |
| "grad_norm": 0.6722294092178345, |
| "learning_rate": 1.2768355114248493e-05, |
| "loss": 0.0856, |
| "step": 31 |
| }, |
| { |
| "epoch": 1.5238095238095237, |
| "grad_norm": 3.3709585666656494, |
| "learning_rate": 1.2225209339563144e-05, |
| "loss": 0.1282, |
| "step": 32 |
| }, |
| { |
| "epoch": 1.5714285714285714, |
| "grad_norm": 3.494358539581299, |
| "learning_rate": 1.1675062233047365e-05, |
| "loss": 0.1166, |
| "step": 33 |
| }, |
| { |
| "epoch": 1.619047619047619, |
| "grad_norm": 1.0232586860656738, |
| "learning_rate": 1.1119644761033079e-05, |
| "loss": 0.1171, |
| "step": 34 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 0.7489449977874756, |
| "learning_rate": 1.0560704472371919e-05, |
| "loss": 0.0844, |
| "step": 35 |
| }, |
| { |
| "epoch": 1.7142857142857144, |
| "grad_norm": 1.0576767921447754, |
| "learning_rate": 1e-05, |
| "loss": 0.1084, |
| "step": 36 |
| }, |
| { |
| "epoch": 1.7619047619047619, |
| "grad_norm": 1.014384150505066, |
| "learning_rate": 9.439295527628083e-06, |
| "loss": 0.0855, |
| "step": 37 |
| }, |
| { |
| "epoch": 1.8095238095238095, |
| "grad_norm": 0.956558108329773, |
| "learning_rate": 8.880355238966923e-06, |
| "loss": 0.0793, |
| "step": 38 |
| }, |
| { |
| "epoch": 1.8571428571428572, |
| "grad_norm": 0.9556608200073242, |
| "learning_rate": 8.324937766952638e-06, |
| "loss": 0.0726, |
| "step": 39 |
| }, |
| { |
| "epoch": 1.9047619047619047, |
| "grad_norm": 2.3250770568847656, |
| "learning_rate": 7.774790660436857e-06, |
| "loss": 0.0802, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.9523809523809523, |
| "grad_norm": 2.596691131591797, |
| "learning_rate": 7.2316448857515076e-06, |
| "loss": 0.1185, |
| "step": 41 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 2.510233163833618, |
| "learning_rate": 6.697209380448333e-06, |
| "loss": 0.0885, |
| "step": 42 |
| }, |
| { |
| "epoch": 2.0476190476190474, |
| "grad_norm": 1.380808711051941, |
| "learning_rate": 6.173165676349103e-06, |
| "loss": 0.0732, |
| "step": 43 |
| }, |
| { |
| "epoch": 2.0952380952380953, |
| "grad_norm": 1.4121789932250977, |
| "learning_rate": 5.66116260882442e-06, |
| "loss": 0.0781, |
| "step": 44 |
| }, |
| { |
| "epoch": 2.142857142857143, |
| "grad_norm": 1.6490198373794556, |
| "learning_rate": 5.1628111289476025e-06, |
| "loss": 0.0627, |
| "step": 45 |
| }, |
| { |
| "epoch": 2.1904761904761907, |
| "grad_norm": 0.9158168435096741, |
| "learning_rate": 4.679679234846636e-06, |
| "loss": 0.0486, |
| "step": 46 |
| }, |
| { |
| "epoch": 2.238095238095238, |
| "grad_norm": 1.6034945249557495, |
| "learning_rate": 4.213287038201943e-06, |
| "loss": 0.0543, |
| "step": 47 |
| }, |
| { |
| "epoch": 2.2857142857142856, |
| "grad_norm": 1.2905007600784302, |
| "learning_rate": 3.7651019814126656e-06, |
| "loss": 0.0587, |
| "step": 48 |
| }, |
| { |
| "epoch": 2.3333333333333335, |
| "grad_norm": 0.971811830997467, |
| "learning_rate": 3.3365342204799613e-06, |
| "loss": 0.0772, |
| "step": 49 |
| }, |
| { |
| "epoch": 2.380952380952381, |
| "grad_norm": 1.3265019655227661, |
| "learning_rate": 2.9289321881345257e-06, |
| "loss": 0.069, |
| "step": 50 |
| }, |
| { |
| "epoch": 2.4285714285714284, |
| "grad_norm": 1.0910696983337402, |
| "learning_rate": 2.5435783511683444e-06, |
| "loss": 0.0531, |
| "step": 51 |
| }, |
| { |
| "epoch": 2.4761904761904763, |
| "grad_norm": 0.5556491017341614, |
| "learning_rate": 2.1816851753197023e-06, |
| "loss": 0.0735, |
| "step": 52 |
| }, |
| { |
| "epoch": 2.5238095238095237, |
| "grad_norm": 0.8411810398101807, |
| "learning_rate": 1.8443913104073984e-06, |
| "loss": 0.0399, |
| "step": 53 |
| }, |
| { |
| "epoch": 2.571428571428571, |
| "grad_norm": 0.5246292948722839, |
| "learning_rate": 1.5327580077171589e-06, |
| "loss": 0.0299, |
| "step": 54 |
| }, |
| { |
| "epoch": 2.619047619047619, |
| "grad_norm": 0.4854477643966675, |
| "learning_rate": 1.2477657809124632e-06, |
| "loss": 0.0348, |
| "step": 55 |
| }, |
| { |
| "epoch": 2.6666666666666665, |
| "grad_norm": 0.8015491962432861, |
| "learning_rate": 9.903113209758098e-07, |
| "loss": 0.0342, |
| "step": 56 |
| }, |
| { |
| "epoch": 2.7142857142857144, |
| "grad_norm": 1.3755743503570557, |
| "learning_rate": 7.612046748871327e-07, |
| "loss": 0.046, |
| "step": 57 |
| }, |
| { |
| "epoch": 2.761904761904762, |
| "grad_norm": 0.956333339214325, |
| "learning_rate": 5.611666969163243e-07, |
| "loss": 0.0662, |
| "step": 58 |
| }, |
| { |
| "epoch": 2.8095238095238093, |
| "grad_norm": 0.7447959184646606, |
| "learning_rate": 3.908267805490051e-07, |
| "loss": 0.0401, |
| "step": 59 |
| }, |
| { |
| "epoch": 2.857142857142857, |
| "grad_norm": 1.1069077253341675, |
| "learning_rate": 2.507208781817638e-07, |
| "loss": 0.0717, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.9047619047619047, |
| "grad_norm": 0.6235014200210571, |
| "learning_rate": 1.4128981481764115e-07, |
| "loss": 0.0464, |
| "step": 61 |
| }, |
| { |
| "epoch": 2.9523809523809526, |
| "grad_norm": 0.7081692218780518, |
| "learning_rate": 6.287790106757396e-08, |
| "loss": 0.0389, |
| "step": 62 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 0.5594448447227478, |
| "learning_rate": 1.5731849821833955e-08, |
| "loss": 0.0408, |
| "step": 63 |
| } |
| ], |
| "logging_steps": 1.0, |
| "max_steps": 63, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 0, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 5.211049111532339e+16, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|