| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.437636761487965, |
| "eval_steps": 500, |
| "global_step": 200, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.02188183807439825, |
| "grad_norm": 7.38808464543784, |
| "learning_rate": 6.666666666666666e-07, |
| "loss": 1.957, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.0437636761487965, |
| "grad_norm": 6.694792139315802, |
| "learning_rate": 1.3333333333333332e-06, |
| "loss": 1.8366, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.06564551422319474, |
| "grad_norm": 2.753054563296277, |
| "learning_rate": 2e-06, |
| "loss": 1.6834, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.087527352297593, |
| "grad_norm": 3.0196829311173383, |
| "learning_rate": 1.997294683476273e-06, |
| "loss": 1.5792, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.10940919037199125, |
| "grad_norm": 2.043213920847318, |
| "learning_rate": 1.9891933713800795e-06, |
| "loss": 1.5421, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.13129102844638948, |
| "grad_norm": 1.9290165973717828, |
| "learning_rate": 1.975739896938375e-06, |
| "loss": 1.4976, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.15317286652078774, |
| "grad_norm": 1.8958297962498376, |
| "learning_rate": 1.9570070519645765e-06, |
| "loss": 1.4696, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.175054704595186, |
| "grad_norm": 3.538308772432866, |
| "learning_rate": 1.9330961930087725e-06, |
| "loss": 1.4409, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.19693654266958424, |
| "grad_norm": 2.1951202653523803, |
| "learning_rate": 1.9041366929546216e-06, |
| "loss": 1.4601, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.2188183807439825, |
| "grad_norm": 2.1497845944555514, |
| "learning_rate": 1.8702852410301553e-06, |
| "loss": 1.4162, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.24070021881838075, |
| "grad_norm": 1.8478185733675352, |
| "learning_rate": 1.8317249950198596e-06, |
| "loss": 1.4207, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.26258205689277897, |
| "grad_norm": 2.0111628068672567, |
| "learning_rate": 1.7886645902651164e-06, |
| "loss": 1.3857, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.2844638949671772, |
| "grad_norm": 4.441684060204544, |
| "learning_rate": 1.7413370108149287e-06, |
| "loss": 1.3821, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.3063457330415755, |
| "grad_norm": 1.9878264739514673, |
| "learning_rate": 1.6899983288347247e-06, |
| "loss": 1.3559, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.3282275711159737, |
| "grad_norm": 2.0597952266115915, |
| "learning_rate": 1.63492631909384e-06, |
| "loss": 1.3683, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.350109409190372, |
| "grad_norm": 1.8085416648968597, |
| "learning_rate": 1.5764189560281674e-06, |
| "loss": 1.3484, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.37199124726477023, |
| "grad_norm": 1.9540811212682783, |
| "learning_rate": 1.5147928015098307e-06, |
| "loss": 1.3377, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.3938730853391685, |
| "grad_norm": 1.896162081853637, |
| "learning_rate": 1.4503812920470533e-06, |
| "loss": 1.3267, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.41575492341356673, |
| "grad_norm": 1.824280170852011, |
| "learning_rate": 1.3835329346815714e-06, |
| "loss": 1.3259, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.437636761487965, |
| "grad_norm": 1.8853200942482258, |
| "learning_rate": 1.3146094213449146e-06, |
| "loss": 1.3293, |
| "step": 200 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 457, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 200, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 32614907904000.0, |
| "train_batch_size": 8, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|