{ "best_global_step": 60, "best_metric": 0.7289719626168224, "best_model_checkpoint": "/content/gemma_lora_imb/checkpoint-60", "epoch": 1.1764705882352942, "eval_steps": 20, "global_step": 60, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.39215686274509803, "grad_norm": 60.42306137084961, "learning_rate": 8.627450980392157e-06, "loss": 1.610392189025879, "step": 20 }, { "epoch": 0.39215686274509803, "eval_accuracy": 0.6305418719211823, "eval_f1": 0.6781115879828327, "eval_loss": 1.1058138608932495, "eval_runtime": 10.907, "eval_samples_per_second": 18.612, "eval_steps_per_second": 0.642, "step": 20 }, { "epoch": 0.7843137254901961, "grad_norm": 29.146869659423828, "learning_rate": 6.666666666666667e-06, "loss": 0.8310173988342285, "step": 40 }, { "epoch": 0.7843137254901961, "eval_accuracy": 0.6896551724137931, "eval_f1": 0.7149321266968326, "eval_loss": 0.8462706208229065, "eval_runtime": 10.8418, "eval_samples_per_second": 18.724, "eval_steps_per_second": 0.646, "step": 40 }, { "epoch": 1.1764705882352942, "grad_norm": 52.65027618408203, "learning_rate": 4.705882352941177e-06, "loss": 0.5961102485656739, "step": 60 }, { "epoch": 1.1764705882352942, "eval_accuracy": 0.7142857142857143, "eval_f1": 0.7289719626168224, "eval_loss": 0.8570315837860107, "eval_runtime": 10.8268, "eval_samples_per_second": 18.75, "eval_steps_per_second": 0.647, "step": 60 } ], "logging_steps": 20, "max_steps": 102, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 20, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 3346006641030144.0, "train_batch_size": 32, "trial_name": null, "trial_params": null }