| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.6345957011258956, |
| "eval_steps": 500, |
| "global_step": 400, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.04094165813715456, |
| "grad_norm": 7.529000759124756, |
| "learning_rate": 9.000000000000001e-07, |
| "loss": 6.639524841308594, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.08188331627430911, |
| "grad_norm": 7.1427388191223145, |
| "learning_rate": 1.9000000000000002e-06, |
| "loss": 6.582770538330078, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.12282497441146366, |
| "grad_norm": 6.251121997833252, |
| "learning_rate": 2.9e-06, |
| "loss": 6.114261245727539, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.16376663254861823, |
| "grad_norm": 7.014133453369141, |
| "learning_rate": 3.900000000000001e-06, |
| "loss": 6.333962631225586, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.2047082906857728, |
| "grad_norm": 7.072969913482666, |
| "learning_rate": 4.9000000000000005e-06, |
| "loss": 6.248543930053711, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.24564994882292732, |
| "grad_norm": 5.674424171447754, |
| "learning_rate": 5.9e-06, |
| "loss": 5.6220745086669925, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.2865916069600819, |
| "grad_norm": 5.3739800453186035, |
| "learning_rate": 6.9e-06, |
| "loss": 5.0251930236816404, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.32753326509723646, |
| "grad_norm": 6.372293472290039, |
| "learning_rate": 7.9e-06, |
| "loss": 5.033017349243164, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.368474923234391, |
| "grad_norm": 7.22106409072876, |
| "learning_rate": 8.900000000000001e-06, |
| "loss": 3.514456939697266, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.4094165813715456, |
| "grad_norm": 4.990958213806152, |
| "learning_rate": 9.9e-06, |
| "loss": 2.256962776184082, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.4503582395087001, |
| "grad_norm": 3.2661993503570557, |
| "learning_rate": 9.76923076923077e-06, |
| "loss": 2.0482555389404298, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.49129989764585463, |
| "grad_norm": 2.997323513031006, |
| "learning_rate": 9.512820512820514e-06, |
| "loss": 1.6077747344970703, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.5322415557830092, |
| "grad_norm": 2.422741413116455, |
| "learning_rate": 9.256410256410257e-06, |
| "loss": 1.538028621673584, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.5731832139201638, |
| "grad_norm": 2.4594290256500244, |
| "learning_rate": 9e-06, |
| "loss": 1.5614049911499024, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.6141248720573184, |
| "grad_norm": 2.7256577014923096, |
| "learning_rate": 8.743589743589743e-06, |
| "loss": 1.7180768966674804, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.6550665301944729, |
| "grad_norm": 2.6614902019500732, |
| "learning_rate": 8.487179487179488e-06, |
| "loss": 1.6412044525146485, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.6960081883316275, |
| "grad_norm": 2.1517934799194336, |
| "learning_rate": 8.230769230769232e-06, |
| "loss": 1.729467010498047, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.736949846468782, |
| "grad_norm": 2.4588229656219482, |
| "learning_rate": 7.974358974358975e-06, |
| "loss": 1.305363941192627, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.7778915046059366, |
| "grad_norm": 2.5282061100006104, |
| "learning_rate": 7.717948717948718e-06, |
| "loss": 1.4986873626708985, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.8188331627430911, |
| "grad_norm": 3.699396848678589, |
| "learning_rate": 7.461538461538462e-06, |
| "loss": 1.5059691429138184, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.8597748208802457, |
| "grad_norm": 2.2632241249084473, |
| "learning_rate": 7.205128205128206e-06, |
| "loss": 1.4249659538269044, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.9007164790174002, |
| "grad_norm": 1.6525421142578125, |
| "learning_rate": 6.948717948717949e-06, |
| "loss": 1.2684237480163574, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.9416581371545547, |
| "grad_norm": 4.144842624664307, |
| "learning_rate": 6.692307692307692e-06, |
| "loss": 1.3331901550292968, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.9825997952917093, |
| "grad_norm": 4.287148475646973, |
| "learning_rate": 6.435897435897437e-06, |
| "loss": 1.3455743789672852, |
| "step": 240 |
| }, |
| { |
| "epoch": 1.0204708290685773, |
| "grad_norm": 1.7868481874465942, |
| "learning_rate": 6.17948717948718e-06, |
| "loss": 1.2180957794189453, |
| "step": 250 |
| }, |
| { |
| "epoch": 1.0614124872057318, |
| "grad_norm": 3.4123318195343018, |
| "learning_rate": 5.923076923076924e-06, |
| "loss": 1.0844655990600587, |
| "step": 260 |
| }, |
| { |
| "epoch": 1.1023541453428864, |
| "grad_norm": 2.9107143878936768, |
| "learning_rate": 5.666666666666667e-06, |
| "loss": 1.2759632110595702, |
| "step": 270 |
| }, |
| { |
| "epoch": 1.143295803480041, |
| "grad_norm": 2.166071653366089, |
| "learning_rate": 5.41025641025641e-06, |
| "loss": 1.2902444839477538, |
| "step": 280 |
| }, |
| { |
| "epoch": 1.1842374616171956, |
| "grad_norm": 3.7286741733551025, |
| "learning_rate": 5.1538461538461534e-06, |
| "loss": 1.1132170677185058, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.22517911975435, |
| "grad_norm": 3.644495964050293, |
| "learning_rate": 4.8974358974358975e-06, |
| "loss": 1.3006702423095704, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.2661207778915047, |
| "grad_norm": 2.9774715900421143, |
| "learning_rate": 4.641025641025642e-06, |
| "loss": 1.3277738571166993, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.3070624360286591, |
| "grad_norm": 3.5428693294525146, |
| "learning_rate": 4.384615384615385e-06, |
| "loss": 1.2050466537475586, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.3480040941658138, |
| "grad_norm": 2.9129176139831543, |
| "learning_rate": 4.128205128205128e-06, |
| "loss": 1.0705522537231444, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.3889457523029682, |
| "grad_norm": 3.6323633193969727, |
| "learning_rate": 3.871794871794872e-06, |
| "loss": 1.1811490058898926, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.429887410440123, |
| "grad_norm": 2.344156265258789, |
| "learning_rate": 3.6153846153846156e-06, |
| "loss": 1.2702527046203613, |
| "step": 350 |
| }, |
| { |
| "epoch": 1.4708290685772774, |
| "grad_norm": 2.93733286857605, |
| "learning_rate": 3.358974358974359e-06, |
| "loss": 1.2678390502929688, |
| "step": 360 |
| }, |
| { |
| "epoch": 1.511770726714432, |
| "grad_norm": 3.3147552013397217, |
| "learning_rate": 3.102564102564103e-06, |
| "loss": 0.975819206237793, |
| "step": 370 |
| }, |
| { |
| "epoch": 1.5527123848515865, |
| "grad_norm": 3.5217061042785645, |
| "learning_rate": 2.846153846153846e-06, |
| "loss": 1.5488268852233886, |
| "step": 380 |
| }, |
| { |
| "epoch": 1.593654042988741, |
| "grad_norm": 2.5279595851898193, |
| "learning_rate": 2.5897435897435903e-06, |
| "loss": 1.1411288261413575, |
| "step": 390 |
| }, |
| { |
| "epoch": 1.6345957011258956, |
| "grad_norm": 2.848078489303589, |
| "learning_rate": 2.3333333333333336e-06, |
| "loss": 1.1716268539428711, |
| "step": 400 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 490, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 2, |
| "save_steps": 200, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.1685665056100352e+16, |
| "train_batch_size": 2, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|