| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.0, |
| "eval_steps": 500, |
| "global_step": 430, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "entropy": 1.7280812841653823, |
| "epoch": 0.05815644082582146, |
| "grad_norm": 0.05810546875, |
| "learning_rate": 0.00019965681014237917, |
| "loss": 1.8778582763671876, |
| "mean_token_accuracy": 0.5898587119579315, |
| "num_tokens": 130892.0, |
| "step": 25 |
| }, |
| { |
| "entropy": 1.728554719053209, |
| "epoch": 0.11631288165164291, |
| "grad_norm": 0.056884765625, |
| "learning_rate": 0.00019634456691705702, |
| "loss": 1.728631591796875, |
| "mean_token_accuracy": 0.6158029875904322, |
| "num_tokens": 260746.0, |
| "step": 50 |
| }, |
| { |
| "entropy": 1.4874446502327918, |
| "epoch": 0.17446932247746438, |
| "grad_norm": 0.07177734375, |
| "learning_rate": 0.00018962470758421342, |
| "loss": 1.4888288879394531, |
| "mean_token_accuracy": 0.6532380232214927, |
| "num_tokens": 391099.0, |
| "step": 75 |
| }, |
| { |
| "entropy": 1.4625397424772382, |
| "epoch": 0.23262576330328583, |
| "grad_norm": 0.0771484375, |
| "learning_rate": 0.00017973490720571864, |
| "loss": 1.4681800842285155, |
| "mean_token_accuracy": 0.66225466825068, |
| "num_tokens": 518575.0, |
| "step": 100 |
| }, |
| { |
| "entropy": 1.3695158807188272, |
| "epoch": 0.2907822041291073, |
| "grad_norm": 0.10791015625, |
| "learning_rate": 0.0001670249586567531, |
| "loss": 1.3395758056640625, |
| "mean_token_accuracy": 0.680420500189066, |
| "num_tokens": 646614.0, |
| "step": 125 |
| }, |
| { |
| "entropy": 1.2654121196083725, |
| "epoch": 0.34893864495492877, |
| "grad_norm": 0.087890625, |
| "learning_rate": 0.00015194440078301536, |
| "loss": 1.2487395477294922, |
| "mean_token_accuracy": 0.7102896820753812, |
| "num_tokens": 772188.0, |
| "step": 150 |
| }, |
| { |
| "entropy": 1.2020046729780733, |
| "epoch": 0.4070950857807502, |
| "grad_norm": 0.1015625, |
| "learning_rate": 0.00013502661863739793, |
| "loss": 1.2076020050048828, |
| "mean_token_accuracy": 0.7211254305392504, |
| "num_tokens": 896970.0, |
| "step": 175 |
| }, |
| { |
| "entropy": 1.1874629692733287, |
| "epoch": 0.46525152660657165, |
| "grad_norm": 0.080078125, |
| "learning_rate": 0.00011686997815570473, |
| "loss": 1.150387954711914, |
| "mean_token_accuracy": 0.7204084581136704, |
| "num_tokens": 1023028.0, |
| "step": 200 |
| }, |
| { |
| "entropy": 1.1370409649983049, |
| "epoch": 0.5234079674323932, |
| "grad_norm": 0.10009765625, |
| "learning_rate": 9.811666252006742e-05, |
| "loss": 1.1151661682128906, |
| "mean_token_accuracy": 0.730134856402874, |
| "num_tokens": 1148105.0, |
| "step": 225 |
| }, |
| { |
| "entropy": 1.205468089338392, |
| "epoch": 0.5815644082582146, |
| "grad_norm": 0.07177734375, |
| "learning_rate": 7.942995874784776e-05, |
| "loss": 1.1882852935791015, |
| "mean_token_accuracy": 0.7171100111305714, |
| "num_tokens": 1277988.0, |
| "step": 250 |
| }, |
| { |
| "entropy": 1.1937428190652282, |
| "epoch": 0.639720849084036, |
| "grad_norm": 0.08203125, |
| "learning_rate": 6.147079785787038e-05, |
| "loss": 1.1849015045166016, |
| "mean_token_accuracy": 0.7218351965397596, |
| "num_tokens": 1407507.0, |
| "step": 275 |
| }, |
| { |
| "entropy": 1.0787318773008883, |
| "epoch": 0.6978772899098575, |
| "grad_norm": 0.0869140625, |
| "learning_rate": 4.4874378366092476e-05, |
| "loss": 1.067366943359375, |
| "mean_token_accuracy": 0.749751016125083, |
| "num_tokens": 1535191.0, |
| "step": 300 |
| }, |
| { |
| "entropy": 1.06566724085249, |
| "epoch": 0.7560337307356789, |
| "grad_norm": 0.08740234375, |
| "learning_rate": 3.0227699915535367e-05, |
| "loss": 1.0334744262695312, |
| "mean_token_accuracy": 0.7519774179905653, |
| "num_tokens": 1664985.0, |
| "step": 325 |
| }, |
| { |
| "entropy": 1.0932638369407506, |
| "epoch": 0.8141901715615004, |
| "grad_norm": 0.0732421875, |
| "learning_rate": 1.8048801654714688e-05, |
| "loss": 1.0842971801757812, |
| "mean_token_accuracy": 0.7464394801110029, |
| "num_tokens": 1793824.0, |
| "step": 350 |
| }, |
| { |
| "entropy": 1.067505011325702, |
| "epoch": 0.8723466123873219, |
| "grad_norm": 0.08837890625, |
| "learning_rate": 8.768439683464868e-06, |
| "loss": 1.0524615478515624, |
| "mean_token_accuracy": 0.7513109978288413, |
| "num_tokens": 1920684.0, |
| "step": 375 |
| }, |
| { |
| "entropy": 1.092575399177149, |
| "epoch": 0.9305030532131433, |
| "grad_norm": 0.06201171875, |
| "learning_rate": 2.7148516175519277e-06, |
| "loss": 1.114994430541992, |
| "mean_token_accuracy": 0.7517227891087532, |
| "num_tokens": 2054940.0, |
| "step": 400 |
| }, |
| { |
| "entropy": 1.1259987969137728, |
| "epoch": 0.9886594940389648, |
| "grad_norm": 0.08544921875, |
| "learning_rate": 1.0214713499706597e-07, |
| "loss": 1.1549060821533204, |
| "mean_token_accuracy": 0.7383282145857811, |
| "num_tokens": 2182777.0, |
| "step": 425 |
| } |
| ], |
| "logging_steps": 25, |
| "max_steps": 430, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 9.421398094196122e+16, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|