| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 3.0, |
| "eval_steps": 0, |
| "global_step": 63, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.047619047619047616, |
| "grad_norm": 135.75784301757812, |
| "learning_rate": 0.0, |
| "loss": 10.6484, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.09523809523809523, |
| "grad_norm": 135.6968536376953, |
| "learning_rate": 2.8571428571428573e-06, |
| "loss": 10.6641, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.14285714285714285, |
| "grad_norm": 135.83665466308594, |
| "learning_rate": 5.7142857142857145e-06, |
| "loss": 9.9766, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.19047619047619047, |
| "grad_norm": 57.94816589355469, |
| "learning_rate": 8.571428571428571e-06, |
| "loss": 3.3945, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.23809523809523808, |
| "grad_norm": 27.795312881469727, |
| "learning_rate": 1.1428571428571429e-05, |
| "loss": 0.8257, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.2857142857142857, |
| "grad_norm": 13.902595520019531, |
| "learning_rate": 1.4285714285714287e-05, |
| "loss": 0.4709, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.3333333333333333, |
| "grad_norm": 7.484889984130859, |
| "learning_rate": 1.7142857142857142e-05, |
| "loss": 0.1921, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.38095238095238093, |
| "grad_norm": 2.4155075550079346, |
| "learning_rate": 2e-05, |
| "loss": 0.1349, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.42857142857142855, |
| "grad_norm": 3.3865394592285156, |
| "learning_rate": 1.998426815017817e-05, |
| "loss": 0.1702, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.47619047619047616, |
| "grad_norm": 0.4871467053890228, |
| "learning_rate": 1.9937122098932428e-05, |
| "loss": 0.1534, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.5238095238095238, |
| "grad_norm": 0.9217754602432251, |
| "learning_rate": 1.985871018518236e-05, |
| "loss": 0.1285, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.5714285714285714, |
| "grad_norm": 1.245365023612976, |
| "learning_rate": 1.9749279121818235e-05, |
| "loss": 0.1144, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.6190476190476191, |
| "grad_norm": 4.214899063110352, |
| "learning_rate": 1.9609173219450998e-05, |
| "loss": 0.1703, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.6666666666666666, |
| "grad_norm": 1.2503230571746826, |
| "learning_rate": 1.9438833303083677e-05, |
| "loss": 0.1322, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.7142857142857143, |
| "grad_norm": 0.74677973985672, |
| "learning_rate": 1.9238795325112867e-05, |
| "loss": 0.1243, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.7619047619047619, |
| "grad_norm": 1.4760054349899292, |
| "learning_rate": 1.900968867902419e-05, |
| "loss": 0.1299, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.8095238095238095, |
| "grad_norm": 0.6457348465919495, |
| "learning_rate": 1.8752234219087538e-05, |
| "loss": 0.129, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.8571428571428571, |
| "grad_norm": 0.7069640755653381, |
| "learning_rate": 1.8467241992282842e-05, |
| "loss": 0.0992, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.9047619047619048, |
| "grad_norm": 0.7505813837051392, |
| "learning_rate": 1.8155608689592604e-05, |
| "loss": 0.0886, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.9523809523809523, |
| "grad_norm": 1.2884610891342163, |
| "learning_rate": 1.78183148246803e-05, |
| "loss": 0.115, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 1.54263436794281, |
| "learning_rate": 1.7456421648831658e-05, |
| "loss": 0.1154, |
| "step": 21 |
| }, |
| { |
| "epoch": 1.0476190476190477, |
| "grad_norm": 1.1725558042526245, |
| "learning_rate": 1.7071067811865477e-05, |
| "loss": 0.0932, |
| "step": 22 |
| }, |
| { |
| "epoch": 1.0952380952380953, |
| "grad_norm": 0.6754504442214966, |
| "learning_rate": 1.6663465779520042e-05, |
| "loss": 0.0907, |
| "step": 23 |
| }, |
| { |
| "epoch": 1.1428571428571428, |
| "grad_norm": 0.7802339196205139, |
| "learning_rate": 1.6234898018587336e-05, |
| "loss": 0.1005, |
| "step": 24 |
| }, |
| { |
| "epoch": 1.1904761904761905, |
| "grad_norm": 0.7150468230247498, |
| "learning_rate": 1.578671296179806e-05, |
| "loss": 0.0739, |
| "step": 25 |
| }, |
| { |
| "epoch": 1.2380952380952381, |
| "grad_norm": 1.3305799961090088, |
| "learning_rate": 1.5320320765153367e-05, |
| "loss": 0.0825, |
| "step": 26 |
| }, |
| { |
| "epoch": 1.2857142857142856, |
| "grad_norm": 1.3834118843078613, |
| "learning_rate": 1.4837188871052399e-05, |
| "loss": 0.0818, |
| "step": 27 |
| }, |
| { |
| "epoch": 1.3333333333333333, |
| "grad_norm": 1.0374525785446167, |
| "learning_rate": 1.4338837391175582e-05, |
| "loss": 0.068, |
| "step": 28 |
| }, |
| { |
| "epoch": 1.380952380952381, |
| "grad_norm": 0.7237675786018372, |
| "learning_rate": 1.3826834323650899e-05, |
| "loss": 0.0616, |
| "step": 29 |
| }, |
| { |
| "epoch": 1.4285714285714286, |
| "grad_norm": 0.36288100481033325, |
| "learning_rate": 1.3302790619551673e-05, |
| "loss": 0.0495, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.4761904761904763, |
| "grad_norm": 1.4335401058197021, |
| "learning_rate": 1.2768355114248493e-05, |
| "loss": 0.0898, |
| "step": 31 |
| }, |
| { |
| "epoch": 1.5238095238095237, |
| "grad_norm": 0.8492462635040283, |
| "learning_rate": 1.2225209339563144e-05, |
| "loss": 0.0441, |
| "step": 32 |
| }, |
| { |
| "epoch": 1.5714285714285714, |
| "grad_norm": 0.5347748398780823, |
| "learning_rate": 1.1675062233047365e-05, |
| "loss": 0.049, |
| "step": 33 |
| }, |
| { |
| "epoch": 1.619047619047619, |
| "grad_norm": 0.7263269424438477, |
| "learning_rate": 1.1119644761033079e-05, |
| "loss": 0.0894, |
| "step": 34 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 0.4866464138031006, |
| "learning_rate": 1.0560704472371919e-05, |
| "loss": 0.0464, |
| "step": 35 |
| }, |
| { |
| "epoch": 1.7142857142857144, |
| "grad_norm": 0.6330035328865051, |
| "learning_rate": 1e-05, |
| "loss": 0.0498, |
| "step": 36 |
| }, |
| { |
| "epoch": 1.7619047619047619, |
| "grad_norm": 0.7745473384857178, |
| "learning_rate": 9.439295527628083e-06, |
| "loss": 0.0573, |
| "step": 37 |
| }, |
| { |
| "epoch": 1.8095238095238095, |
| "grad_norm": 0.5323076844215393, |
| "learning_rate": 8.880355238966923e-06, |
| "loss": 0.0571, |
| "step": 38 |
| }, |
| { |
| "epoch": 1.8571428571428572, |
| "grad_norm": 0.4111030101776123, |
| "learning_rate": 8.324937766952638e-06, |
| "loss": 0.0349, |
| "step": 39 |
| }, |
| { |
| "epoch": 1.9047619047619047, |
| "grad_norm": 0.8452009558677673, |
| "learning_rate": 7.774790660436857e-06, |
| "loss": 0.0701, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.9523809523809523, |
| "grad_norm": 0.6247994899749756, |
| "learning_rate": 7.2316448857515076e-06, |
| "loss": 0.0537, |
| "step": 41 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 0.2583736479282379, |
| "learning_rate": 6.697209380448333e-06, |
| "loss": 0.0371, |
| "step": 42 |
| }, |
| { |
| "epoch": 2.0476190476190474, |
| "grad_norm": 0.3620803654193878, |
| "learning_rate": 6.173165676349103e-06, |
| "loss": 0.0321, |
| "step": 43 |
| }, |
| { |
| "epoch": 2.0952380952380953, |
| "grad_norm": 0.5690107941627502, |
| "learning_rate": 5.66116260882442e-06, |
| "loss": 0.0545, |
| "step": 44 |
| }, |
| { |
| "epoch": 2.142857142857143, |
| "grad_norm": 0.3473432958126068, |
| "learning_rate": 5.1628111289476025e-06, |
| "loss": 0.0253, |
| "step": 45 |
| }, |
| { |
| "epoch": 2.1904761904761907, |
| "grad_norm": 0.7409687042236328, |
| "learning_rate": 4.679679234846636e-06, |
| "loss": 0.0504, |
| "step": 46 |
| }, |
| { |
| "epoch": 2.238095238095238, |
| "grad_norm": 0.37499386072158813, |
| "learning_rate": 4.213287038201943e-06, |
| "loss": 0.0336, |
| "step": 47 |
| }, |
| { |
| "epoch": 2.2857142857142856, |
| "grad_norm": 0.26719093322753906, |
| "learning_rate": 3.7651019814126656e-06, |
| "loss": 0.0186, |
| "step": 48 |
| }, |
| { |
| "epoch": 2.3333333333333335, |
| "grad_norm": 0.3391411602497101, |
| "learning_rate": 3.3365342204799613e-06, |
| "loss": 0.0282, |
| "step": 49 |
| }, |
| { |
| "epoch": 2.380952380952381, |
| "grad_norm": 0.3170095682144165, |
| "learning_rate": 2.9289321881345257e-06, |
| "loss": 0.0251, |
| "step": 50 |
| }, |
| { |
| "epoch": 2.4285714285714284, |
| "grad_norm": 0.2487771213054657, |
| "learning_rate": 2.5435783511683444e-06, |
| "loss": 0.0274, |
| "step": 51 |
| }, |
| { |
| "epoch": 2.4761904761904763, |
| "grad_norm": 0.4128139019012451, |
| "learning_rate": 2.1816851753197023e-06, |
| "loss": 0.0292, |
| "step": 52 |
| }, |
| { |
| "epoch": 2.5238095238095237, |
| "grad_norm": 0.6280130743980408, |
| "learning_rate": 1.8443913104073984e-06, |
| "loss": 0.0423, |
| "step": 53 |
| }, |
| { |
| "epoch": 2.571428571428571, |
| "grad_norm": 0.48291048407554626, |
| "learning_rate": 1.5327580077171589e-06, |
| "loss": 0.022, |
| "step": 54 |
| }, |
| { |
| "epoch": 2.619047619047619, |
| "grad_norm": 0.508919358253479, |
| "learning_rate": 1.2477657809124632e-06, |
| "loss": 0.0444, |
| "step": 55 |
| }, |
| { |
| "epoch": 2.6666666666666665, |
| "grad_norm": 0.3738269805908203, |
| "learning_rate": 9.903113209758098e-07, |
| "loss": 0.0291, |
| "step": 56 |
| }, |
| { |
| "epoch": 2.7142857142857144, |
| "grad_norm": 0.25037050247192383, |
| "learning_rate": 7.612046748871327e-07, |
| "loss": 0.0239, |
| "step": 57 |
| }, |
| { |
| "epoch": 2.761904761904762, |
| "grad_norm": 0.30901408195495605, |
| "learning_rate": 5.611666969163243e-07, |
| "loss": 0.021, |
| "step": 58 |
| }, |
| { |
| "epoch": 2.8095238095238093, |
| "grad_norm": 0.6213399767875671, |
| "learning_rate": 3.908267805490051e-07, |
| "loss": 0.0327, |
| "step": 59 |
| }, |
| { |
| "epoch": 2.857142857142857, |
| "grad_norm": 0.2997397780418396, |
| "learning_rate": 2.507208781817638e-07, |
| "loss": 0.0316, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.9047619047619047, |
| "grad_norm": 0.3204822540283203, |
| "learning_rate": 1.4128981481764115e-07, |
| "loss": 0.0299, |
| "step": 61 |
| }, |
| { |
| "epoch": 2.9523809523809526, |
| "grad_norm": 0.39552855491638184, |
| "learning_rate": 6.287790106757396e-08, |
| "loss": 0.0235, |
| "step": 62 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 0.5192532539367676, |
| "learning_rate": 1.5731849821833955e-08, |
| "loss": 0.0399, |
| "step": 63 |
| } |
| ], |
| "logging_steps": 1.0, |
| "max_steps": 63, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 0, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.0854328659083264e+17, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|