FRRouting-Qwen2.5-14B / checkpoint-115 /trainer_state.json
LayerEight's picture
Upload folder using huggingface_hub
4f7b5b5 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 115,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"entropy": 1.5443342211842537,
"epoch": 0.2176278563656148,
"grad_norm": 0.055419921875,
"learning_rate": 0.00018440244840299506,
"loss": 1.5704032897949218,
"mean_token_accuracy": 0.6370699141919612,
"num_tokens": 160909.0,
"step": 25
},
{
"entropy": 1.1008353152126074,
"epoch": 0.4352557127312296,
"grad_norm": 0.109375,
"learning_rate": 0.00012928227712765504,
"loss": 1.0481930541992188,
"mean_token_accuracy": 0.7331653463840485,
"num_tokens": 326278.0,
"step": 50
},
{
"entropy": 0.6921453317627311,
"epoch": 0.6528835690968444,
"grad_norm": 0.13671875,
"learning_rate": 6.010346486845837e-05,
"loss": 0.6511263275146484,
"mean_token_accuracy": 0.8276160681247711,
"num_tokens": 488512.0,
"step": 75
},
{
"entropy": 0.46901930367574096,
"epoch": 0.8705114254624592,
"grad_norm": 0.12451171875,
"learning_rate": 1.0079278510416313e-05,
"loss": 0.4208272171020508,
"mean_token_accuracy": 0.8860271820425987,
"num_tokens": 651559.0,
"step": 100
}
],
"logging_steps": 25,
"max_steps": 115,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.330634898384486e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}