| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 8.0, |
| "eval_steps": 500, |
| "global_step": 120, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.3333333333333333, |
| "grad_norm": 79.55762481689453, |
| "learning_rate": 4.9786121534345265e-05, |
| "loss": 45.253, |
| "num_input_tokens_seen": 47512, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.6666666666666666, |
| "grad_norm": 17.002260208129883, |
| "learning_rate": 4.914814565722671e-05, |
| "loss": 3.0254, |
| "num_input_tokens_seen": 94536, |
| "step": 10 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 35.45703887939453, |
| "learning_rate": 4.8096988312782174e-05, |
| "loss": 3.1619, |
| "num_input_tokens_seen": 141336, |
| "step": 15 |
| }, |
| { |
| "epoch": 1.3333333333333333, |
| "grad_norm": 32.79413604736328, |
| "learning_rate": 4.665063509461097e-05, |
| "loss": 1.7871, |
| "num_input_tokens_seen": 188368, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 57.80039978027344, |
| "learning_rate": 4.4833833507280884e-05, |
| "loss": 2.643, |
| "num_input_tokens_seen": 235128, |
| "step": 25 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 8.35395622253418, |
| "learning_rate": 4.267766952966369e-05, |
| "loss": 2.3801, |
| "num_input_tokens_seen": 282672, |
| "step": 30 |
| }, |
| { |
| "epoch": 2.3333333333333335, |
| "grad_norm": 13.160042762756348, |
| "learning_rate": 4.021903572521802e-05, |
| "loss": 1.6999, |
| "num_input_tokens_seen": 329864, |
| "step": 35 |
| }, |
| { |
| "epoch": 2.6666666666666665, |
| "grad_norm": 31.193811416625977, |
| "learning_rate": 3.7500000000000003e-05, |
| "loss": 1.5571, |
| "num_input_tokens_seen": 377952, |
| "step": 40 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 57.113609313964844, |
| "learning_rate": 3.456708580912725e-05, |
| "loss": 1.7335, |
| "num_input_tokens_seen": 424008, |
| "step": 45 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 26.844955444335938, |
| "learning_rate": 3.147047612756302e-05, |
| "loss": 1.1859, |
| "num_input_tokens_seen": 471344, |
| "step": 50 |
| }, |
| { |
| "epoch": 3.6666666666666665, |
| "grad_norm": 19.28535270690918, |
| "learning_rate": 2.8263154805501297e-05, |
| "loss": 0.9846, |
| "num_input_tokens_seen": 517832, |
| "step": 55 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 16.303863525390625, |
| "learning_rate": 2.5e-05, |
| "loss": 0.901, |
| "num_input_tokens_seen": 565344, |
| "step": 60 |
| }, |
| { |
| "epoch": 4.333333333333333, |
| "grad_norm": 38.277183532714844, |
| "learning_rate": 2.173684519449872e-05, |
| "loss": 0.4142, |
| "num_input_tokens_seen": 612336, |
| "step": 65 |
| }, |
| { |
| "epoch": 4.666666666666667, |
| "grad_norm": 16.51125144958496, |
| "learning_rate": 1.852952387243698e-05, |
| "loss": 0.6221, |
| "num_input_tokens_seen": 659560, |
| "step": 70 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 55.58554458618164, |
| "learning_rate": 1.5432914190872757e-05, |
| "loss": 0.7115, |
| "num_input_tokens_seen": 706680, |
| "step": 75 |
| }, |
| { |
| "epoch": 5.333333333333333, |
| "grad_norm": 7.757297992706299, |
| "learning_rate": 1.2500000000000006e-05, |
| "loss": 0.1582, |
| "num_input_tokens_seen": 754048, |
| "step": 80 |
| }, |
| { |
| "epoch": 5.666666666666667, |
| "grad_norm": 6.108384609222412, |
| "learning_rate": 9.780964274781984e-06, |
| "loss": 0.1891, |
| "num_input_tokens_seen": 800984, |
| "step": 85 |
| }, |
| { |
| "epoch": 6.0, |
| "grad_norm": 0.9440665245056152, |
| "learning_rate": 7.3223304703363135e-06, |
| "loss": 0.2214, |
| "num_input_tokens_seen": 848016, |
| "step": 90 |
| }, |
| { |
| "epoch": 6.333333333333333, |
| "grad_norm": 0.9317387342453003, |
| "learning_rate": 5.166166492719124e-06, |
| "loss": 0.0135, |
| "num_input_tokens_seen": 895040, |
| "step": 95 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 0.1581590473651886, |
| "learning_rate": 3.3493649053890326e-06, |
| "loss": 0.0055, |
| "num_input_tokens_seen": 941960, |
| "step": 100 |
| }, |
| { |
| "epoch": 7.0, |
| "grad_norm": 1.00675368309021, |
| "learning_rate": 1.9030116872178316e-06, |
| "loss": 0.031, |
| "num_input_tokens_seen": 989352, |
| "step": 105 |
| }, |
| { |
| "epoch": 7.333333333333333, |
| "grad_norm": 0.6310182213783264, |
| "learning_rate": 8.51854342773295e-07, |
| "loss": 0.0085, |
| "num_input_tokens_seen": 1035760, |
| "step": 110 |
| }, |
| { |
| "epoch": 7.666666666666667, |
| "grad_norm": 0.24195235967636108, |
| "learning_rate": 2.1387846565474045e-07, |
| "loss": 0.0032, |
| "num_input_tokens_seen": 1083624, |
| "step": 115 |
| }, |
| { |
| "epoch": 8.0, |
| "grad_norm": 0.020462460815906525, |
| "learning_rate": 0.0, |
| "loss": 0.0005, |
| "num_input_tokens_seen": 1130688, |
| "step": 120 |
| }, |
| { |
| "epoch": 8.0, |
| "num_input_tokens_seen": 1130688, |
| "step": 120, |
| "total_flos": 1.7959027775641805e+17, |
| "train_loss": 2.86213872662629, |
| "train_runtime": 778.6794, |
| "train_samples_per_second": 1.233, |
| "train_steps_per_second": 0.154 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 120, |
| "num_input_tokens_seen": 1130688, |
| "num_train_epochs": 8, |
| "save_steps": 100, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.7959027775641805e+17, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|