| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 3.0, |
| "eval_steps": 500, |
| "global_step": 15000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.1, |
| "grad_norm": 31.439960479736328, |
| "learning_rate": 1.6666666666666667e-05, |
| "loss": 1.3922, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.2, |
| "grad_norm": 2.689053535461426, |
| "learning_rate": 3.3333333333333335e-05, |
| "loss": 0.6572, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.3, |
| "grad_norm": 9.427984237670898, |
| "learning_rate": 5e-05, |
| "loss": 0.5827, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.4, |
| "grad_norm": 12.102423667907715, |
| "learning_rate": 4.983095894354858e-05, |
| "loss": 0.5335, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.5, |
| "grad_norm": 13.714262008666992, |
| "learning_rate": 4.9326121764495596e-05, |
| "loss": 0.51, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.6, |
| "grad_norm": 13.829536437988281, |
| "learning_rate": 4.849231551964771e-05, |
| "loss": 0.4889, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.7, |
| "grad_norm": 18.672182083129883, |
| "learning_rate": 4.734081600808531e-05, |
| "loss": 0.4614, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.8, |
| "grad_norm": 16.94382667541504, |
| "learning_rate": 4.588719528532342e-05, |
| "loss": 0.4214, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.9, |
| "grad_norm": 13.588418960571289, |
| "learning_rate": 4.415111107797445e-05, |
| "loss": 0.4399, |
| "step": 4500 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 12.39639949798584, |
| "learning_rate": 4.215604094671835e-05, |
| "loss": 0.4323, |
| "step": 5000 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_accuracy": 0.83475, |
| "eval_loss": 0.4028127193450928, |
| "eval_runtime": 224.488, |
| "eval_samples_per_second": 17.818, |
| "eval_steps_per_second": 2.227, |
| "step": 5000 |
| }, |
| { |
| "epoch": 1.1, |
| "grad_norm": 19.143348693847656, |
| "learning_rate": 3.9928964792569655e-05, |
| "loss": 0.3163, |
| "step": 5500 |
| }, |
| { |
| "epoch": 1.2, |
| "grad_norm": 22.87167739868164, |
| "learning_rate": 3.7500000000000003e-05, |
| "loss": 0.333, |
| "step": 6000 |
| }, |
| { |
| "epoch": 1.3, |
| "grad_norm": 2.1354031562805176, |
| "learning_rate": 3.490199415097892e-05, |
| "loss": 0.3329, |
| "step": 6500 |
| }, |
| { |
| "epoch": 1.4, |
| "grad_norm": 21.64975357055664, |
| "learning_rate": 3.217008081777726e-05, |
| "loss": 0.327, |
| "step": 7000 |
| }, |
| { |
| "epoch": 1.5, |
| "grad_norm": 15.015271186828613, |
| "learning_rate": 2.9341204441673266e-05, |
| "loss": 0.307, |
| "step": 7500 |
| }, |
| { |
| "epoch": 1.6, |
| "grad_norm": 7.016956329345703, |
| "learning_rate": 2.6453620722761896e-05, |
| "loss": 0.3177, |
| "step": 8000 |
| }, |
| { |
| "epoch": 1.7, |
| "grad_norm": 7.591048240661621, |
| "learning_rate": 2.3546379277238107e-05, |
| "loss": 0.289, |
| "step": 8500 |
| }, |
| { |
| "epoch": 1.8, |
| "grad_norm": 89.22419738769531, |
| "learning_rate": 2.0658795558326743e-05, |
| "loss": 0.3074, |
| "step": 9000 |
| }, |
| { |
| "epoch": 1.9, |
| "grad_norm": 1.194404125213623, |
| "learning_rate": 1.7829919182222752e-05, |
| "loss": 0.2857, |
| "step": 9500 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 6.9914679527282715, |
| "learning_rate": 1.509800584902108e-05, |
| "loss": 0.2904, |
| "step": 10000 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_accuracy": 0.87, |
| "eval_loss": 0.534140408039093, |
| "eval_runtime": 224.6782, |
| "eval_samples_per_second": 17.803, |
| "eval_steps_per_second": 2.225, |
| "step": 10000 |
| }, |
| { |
| "epoch": 2.1, |
| "grad_norm": 95.4480972290039, |
| "learning_rate": 1.2500000000000006e-05, |
| "loss": 0.0852, |
| "step": 10500 |
| }, |
| { |
| "epoch": 2.2, |
| "grad_norm": 52.108890533447266, |
| "learning_rate": 1.0071035207430352e-05, |
| "loss": 0.1166, |
| "step": 11000 |
| }, |
| { |
| "epoch": 2.3, |
| "grad_norm": 0.004855601117014885, |
| "learning_rate": 7.843959053281663e-06, |
| "loss": 0.1055, |
| "step": 11500 |
| }, |
| { |
| "epoch": 2.4, |
| "grad_norm": 0.0002819446672219783, |
| "learning_rate": 5.848888922025553e-06, |
| "loss": 0.0829, |
| "step": 12000 |
| }, |
| { |
| "epoch": 2.5, |
| "grad_norm": 0.0005901407566852868, |
| "learning_rate": 4.112804714676594e-06, |
| "loss": 0.0864, |
| "step": 12500 |
| }, |
| { |
| "epoch": 2.6, |
| "grad_norm": 0.0008024872513487935, |
| "learning_rate": 2.659183991914696e-06, |
| "loss": 0.0878, |
| "step": 13000 |
| }, |
| { |
| "epoch": 2.7, |
| "grad_norm": 0.0009197063627652824, |
| "learning_rate": 1.5076844803522922e-06, |
| "loss": 0.0806, |
| "step": 13500 |
| }, |
| { |
| "epoch": 2.8, |
| "grad_norm": 0.007043409161269665, |
| "learning_rate": 6.738782355044049e-07, |
| "loss": 0.0964, |
| "step": 14000 |
| }, |
| { |
| "epoch": 2.9, |
| "grad_norm": 0.1506282240152359, |
| "learning_rate": 1.6904105645142444e-07, |
| "loss": 0.0856, |
| "step": 14500 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 0.014456248842179775, |
| "learning_rate": 0.0, |
| "loss": 0.0835, |
| "step": 15000 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_accuracy": 0.8755, |
| "eval_loss": 0.9834522604942322, |
| "eval_runtime": 224.9074, |
| "eval_samples_per_second": 17.785, |
| "eval_steps_per_second": 2.223, |
| "step": 15000 |
| } |
| ], |
| "logging_steps": 500, |
| "max_steps": 15000, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 0.0, |
| "train_batch_size": 8, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|