tempgraphrag-grpo / sft /checkpoint-200 /trainer_state.json
Guen's picture
sync 2026-04-24T07:32:24+00:00
6058c6f verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8188331627430911,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04094165813715456,
"grad_norm": 7.529000759124756,
"learning_rate": 9.000000000000001e-07,
"loss": 6.639524841308594,
"step": 10
},
{
"epoch": 0.08188331627430911,
"grad_norm": 7.1427388191223145,
"learning_rate": 1.9000000000000002e-06,
"loss": 6.582770538330078,
"step": 20
},
{
"epoch": 0.12282497441146366,
"grad_norm": 6.251121997833252,
"learning_rate": 2.9e-06,
"loss": 6.114261245727539,
"step": 30
},
{
"epoch": 0.16376663254861823,
"grad_norm": 7.014133453369141,
"learning_rate": 3.900000000000001e-06,
"loss": 6.333962631225586,
"step": 40
},
{
"epoch": 0.2047082906857728,
"grad_norm": 7.072969913482666,
"learning_rate": 4.9000000000000005e-06,
"loss": 6.248543930053711,
"step": 50
},
{
"epoch": 0.24564994882292732,
"grad_norm": 5.674424171447754,
"learning_rate": 5.9e-06,
"loss": 5.6220745086669925,
"step": 60
},
{
"epoch": 0.2865916069600819,
"grad_norm": 5.3739800453186035,
"learning_rate": 6.9e-06,
"loss": 5.0251930236816404,
"step": 70
},
{
"epoch": 0.32753326509723646,
"grad_norm": 6.372293472290039,
"learning_rate": 7.9e-06,
"loss": 5.033017349243164,
"step": 80
},
{
"epoch": 0.368474923234391,
"grad_norm": 7.22106409072876,
"learning_rate": 8.900000000000001e-06,
"loss": 3.514456939697266,
"step": 90
},
{
"epoch": 0.4094165813715456,
"grad_norm": 4.990958213806152,
"learning_rate": 9.9e-06,
"loss": 2.256962776184082,
"step": 100
},
{
"epoch": 0.4503582395087001,
"grad_norm": 3.2661993503570557,
"learning_rate": 9.76923076923077e-06,
"loss": 2.0482555389404298,
"step": 110
},
{
"epoch": 0.49129989764585463,
"grad_norm": 2.997323513031006,
"learning_rate": 9.512820512820514e-06,
"loss": 1.6077747344970703,
"step": 120
},
{
"epoch": 0.5322415557830092,
"grad_norm": 2.422741413116455,
"learning_rate": 9.256410256410257e-06,
"loss": 1.538028621673584,
"step": 130
},
{
"epoch": 0.5731832139201638,
"grad_norm": 2.4594290256500244,
"learning_rate": 9e-06,
"loss": 1.5614049911499024,
"step": 140
},
{
"epoch": 0.6141248720573184,
"grad_norm": 2.7256577014923096,
"learning_rate": 8.743589743589743e-06,
"loss": 1.7180768966674804,
"step": 150
},
{
"epoch": 0.6550665301944729,
"grad_norm": 2.6614902019500732,
"learning_rate": 8.487179487179488e-06,
"loss": 1.6412044525146485,
"step": 160
},
{
"epoch": 0.6960081883316275,
"grad_norm": 2.1517934799194336,
"learning_rate": 8.230769230769232e-06,
"loss": 1.729467010498047,
"step": 170
},
{
"epoch": 0.736949846468782,
"grad_norm": 2.4588229656219482,
"learning_rate": 7.974358974358975e-06,
"loss": 1.305363941192627,
"step": 180
},
{
"epoch": 0.7778915046059366,
"grad_norm": 2.5282061100006104,
"learning_rate": 7.717948717948718e-06,
"loss": 1.4986873626708985,
"step": 190
},
{
"epoch": 0.8188331627430911,
"grad_norm": 3.699396848678589,
"learning_rate": 7.461538461538462e-06,
"loss": 1.5059691429138184,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 490,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5850613446242304.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}