| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.0, |
| "eval_steps": 500, |
| "global_step": 450, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "entropy": 1.5236270803213119, |
| "epoch": 0.05561735261401557, |
| "grad_norm": 0.055419921875, |
| "learning_rate": 0.00019974051702905277, |
| "loss": 1.5805291748046875, |
| "mean_token_accuracy": 0.647714718580246, |
| "num_tokens": 143908.0, |
| "step": 25 |
| }, |
| { |
| "entropy": 1.3212773679941892, |
| "epoch": 0.11123470522803114, |
| "grad_norm": 0.10400390625, |
| "learning_rate": 0.00019683677672505253, |
| "loss": 1.2404356384277344, |
| "mean_token_accuracy": 0.7040169030427933, |
| "num_tokens": 286606.0, |
| "step": 50 |
| }, |
| { |
| "entropy": 1.1558487902022898, |
| "epoch": 0.1668520578420467, |
| "grad_norm": 0.08984375, |
| "learning_rate": 0.0001907992282510675, |
| "loss": 1.0775267791748047, |
| "mean_token_accuracy": 0.7465412376821041, |
| "num_tokens": 420820.0, |
| "step": 75 |
| }, |
| { |
| "entropy": 1.0273668716661633, |
| "epoch": 0.22246941045606228, |
| "grad_norm": 0.08642578125, |
| "learning_rate": 0.00018182325727950777, |
| "loss": 0.9585573577880859, |
| "mean_token_accuracy": 0.7694233341515064, |
| "num_tokens": 556416.0, |
| "step": 100 |
| }, |
| { |
| "entropy": 0.8720588740799576, |
| "epoch": 0.27808676307007785, |
| "grad_norm": 0.09033203125, |
| "learning_rate": 0.00017019934199557867, |
| "loss": 0.7967613983154297, |
| "mean_token_accuracy": 0.8086028082668781, |
| "num_tokens": 693255.0, |
| "step": 125 |
| }, |
| { |
| "entropy": 0.7958288640156388, |
| "epoch": 0.3337041156840934, |
| "grad_norm": 0.08642578125, |
| "learning_rate": 0.00015630365271303827, |
| "loss": 0.6878538513183594, |
| "mean_token_accuracy": 0.8273787525296211, |
| "num_tokens": 830019.0, |
| "step": 150 |
| }, |
| { |
| "entropy": 0.7475133097125217, |
| "epoch": 0.389321468298109, |
| "grad_norm": 0.0791015625, |
| "learning_rate": 0.00014058587834217355, |
| "loss": 0.6619260406494141, |
| "mean_token_accuracy": 0.8369621601700783, |
| "num_tokens": 970194.0, |
| "step": 175 |
| }, |
| { |
| "entropy": 0.6880824944004417, |
| "epoch": 0.44493882091212456, |
| "grad_norm": 0.0927734375, |
| "learning_rate": 0.00012355467366687156, |
| "loss": 0.6069028854370118, |
| "mean_token_accuracy": 0.8497222004830838, |
| "num_tokens": 1106500.0, |
| "step": 200 |
| }, |
| { |
| "entropy": 0.6797102421335875, |
| "epoch": 0.5005561735261401, |
| "grad_norm": 0.08935546875, |
| "learning_rate": 0.00010576119838245844, |
| "loss": 0.6194157028198242, |
| "mean_token_accuracy": 0.851585813164711, |
| "num_tokens": 1248886.0, |
| "step": 225 |
| }, |
| { |
| "entropy": 0.6410426250938326, |
| "epoch": 0.5561735261401557, |
| "grad_norm": 0.0576171875, |
| "learning_rate": 8.778128059995269e-05, |
| "loss": 0.5677408218383789, |
| "mean_token_accuracy": 0.8584357848763466, |
| "num_tokens": 1393484.0, |
| "step": 250 |
| }, |
| { |
| "entropy": 0.49390312943141906, |
| "epoch": 0.6117908787541713, |
| "grad_norm": 0.03466796875, |
| "learning_rate": 7.019678203706163e-05, |
| "loss": 0.4471506881713867, |
| "mean_token_accuracy": 0.890991186350584, |
| "num_tokens": 1537897.0, |
| "step": 275 |
| }, |
| { |
| "entropy": 0.6916981361201033, |
| "epoch": 0.6674082313681868, |
| "grad_norm": 0.12353515625, |
| "learning_rate": 5.357676795106799e-05, |
| "loss": 0.6350007247924805, |
| "mean_token_accuracy": 0.8496893347799778, |
| "num_tokens": 1675749.0, |
| "step": 300 |
| }, |
| { |
| "entropy": 0.6515753491269425, |
| "epoch": 0.7230255839822024, |
| "grad_norm": 0.0751953125, |
| "learning_rate": 3.845909118765073e-05, |
| "loss": 0.6029189682006836, |
| "mean_token_accuracy": 0.8555391871929169, |
| "num_tokens": 1815517.0, |
| "step": 325 |
| }, |
| { |
| "entropy": 0.6985861426126212, |
| "epoch": 0.778642936596218, |
| "grad_norm": 0.0390625, |
| "learning_rate": 2.5332986318163886e-05, |
| "loss": 0.6293030548095703, |
| "mean_token_accuracy": 0.8477095763385296, |
| "num_tokens": 1955691.0, |
| "step": 350 |
| }, |
| { |
| "entropy": 0.6039358280133456, |
| "epoch": 0.8342602892102335, |
| "grad_norm": 0.06494140625, |
| "learning_rate": 1.462323714966114e-05, |
| "loss": 0.5598350524902344, |
| "mean_token_accuracy": 0.8694348326325416, |
| "num_tokens": 2096304.0, |
| "step": 375 |
| }, |
| { |
| "entropy": 0.6356105864001438, |
| "epoch": 0.8898776418242491, |
| "grad_norm": 0.049560546875, |
| "learning_rate": 6.676429974848563e-06, |
| "loss": 0.6045819091796875, |
| "mean_token_accuracy": 0.8612590564787388, |
| "num_tokens": 2230962.0, |
| "step": 400 |
| }, |
| { |
| "entropy": 0.6548895966866985, |
| "epoch": 0.9454949944382648, |
| "grad_norm": 0.07861328125, |
| "learning_rate": 1.7497374309405346e-06, |
| "loss": 0.5994021224975586, |
| "mean_token_accuracy": 0.8573913918435574, |
| "num_tokens": 2364095.0, |
| "step": 425 |
| }, |
| { |
| "entropy": 0.6505621709737318, |
| "epoch": 1.0, |
| "grad_norm": 0.08837890625, |
| "learning_rate": 2.5959414452270124e-09, |
| "loss": 0.6095585632324219, |
| "mean_token_accuracy": 0.8590780609116262, |
| "num_tokens": 2497006.0, |
| "step": 450 |
| } |
| ], |
| "logging_steps": 25, |
| "max_steps": 450, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.0653709786799923e+17, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|