asd-interpreter-lora / checkpoint-357 /trainer_state.json
Yatsuiii's picture
LoRA fine-tune on AMD MI300X: Qwen2.5-7B ASD clinical interpreter
e20e11e verified
{
"best_global_step": 350,
"best_metric": 0.10404938459396362,
"best_model_checkpoint": "finetune_data/asd_interpreter_lora/checkpoint-350",
"epoch": 3.0,
"eval_steps": 50,
"global_step": 357,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08421052631578947,
"grad_norm": 1.0511218309402466,
"learning_rate": 0.0001,
"loss": 2.6112,
"mean_token_accuracy": 0.5126021690666676,
"num_tokens": 84675.0,
"step": 10
},
{
"epoch": 0.16842105263157894,
"grad_norm": 0.9096640944480896,
"learning_rate": 0.00019999570594853575,
"loss": 1.2518,
"mean_token_accuracy": 0.6996544972062111,
"num_tokens": 164396.0,
"step": 20
},
{
"epoch": 0.25263157894736843,
"grad_norm": 3.1940813064575195,
"learning_rate": 0.00019948086584424256,
"loss": 0.3253,
"mean_token_accuracy": 0.9035305514931679,
"num_tokens": 241947.0,
"step": 30
},
{
"epoch": 0.3368421052631579,
"grad_norm": 0.4178029000759125,
"learning_rate": 0.0001981122789824607,
"loss": 0.2067,
"mean_token_accuracy": 0.9319008827209473,
"num_tokens": 326149.0,
"step": 40
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.23984290659427643,
"learning_rate": 0.00019590169060269602,
"loss": 0.1369,
"mean_token_accuracy": 0.9472503453493119,
"num_tokens": 405814.0,
"step": 50
},
{
"epoch": 0.42105263157894735,
"eval_loss": 0.12887074053287506,
"eval_mean_token_accuracy": 0.9501652488341699,
"eval_num_tokens": 405814.0,
"eval_runtime": 3.2571,
"eval_samples_per_second": 30.702,
"eval_steps_per_second": 3.991,
"step": 50
},
{
"epoch": 0.5052631578947369,
"grad_norm": 0.32201144099235535,
"learning_rate": 0.00019286807201812867,
"loss": 0.1211,
"mean_token_accuracy": 0.9524481907486916,
"num_tokens": 484365.0,
"step": 60
},
{
"epoch": 0.5894736842105263,
"grad_norm": 0.1699223816394806,
"learning_rate": 0.00018903745780342839,
"loss": 0.1194,
"mean_token_accuracy": 0.9521158829331398,
"num_tokens": 568191.0,
"step": 70
},
{
"epoch": 0.6736842105263158,
"grad_norm": 0.11287683993577957,
"learning_rate": 0.0001844427223655199,
"loss": 0.1133,
"mean_token_accuracy": 0.9534999072551728,
"num_tokens": 647422.0,
"step": 80
},
{
"epoch": 0.7578947368421053,
"grad_norm": 0.16161221265792847,
"learning_rate": 0.00017912329781477287,
"loss": 0.1111,
"mean_token_accuracy": 0.9529792726039886,
"num_tokens": 726806.0,
"step": 90
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.13746266067028046,
"learning_rate": 0.00017312483555785086,
"loss": 0.111,
"mean_token_accuracy": 0.9529147148132324,
"num_tokens": 809955.0,
"step": 100
},
{
"epoch": 0.8421052631578947,
"eval_loss": 0.11152192950248718,
"eval_mean_token_accuracy": 0.952180761557359,
"eval_num_tokens": 809955.0,
"eval_runtime": 3.3115,
"eval_samples_per_second": 30.198,
"eval_steps_per_second": 3.926,
"step": 100
},
{
"epoch": 0.9263157894736842,
"grad_norm": 0.1132306233048439,
"learning_rate": 0.00016649881451643705,
"loss": 0.109,
"mean_token_accuracy": 0.9546180069446564,
"num_tokens": 888974.0,
"step": 110
},
{
"epoch": 1.0084210526315789,
"grad_norm": 0.15029756724834442,
"learning_rate": 0.00015930209933411036,
"loss": 0.1073,
"mean_token_accuracy": 0.954869093039097,
"num_tokens": 965339.0,
"step": 120
},
{
"epoch": 1.0926315789473684,
"grad_norm": 0.09324247390031815,
"learning_rate": 0.0001515964523628501,
"loss": 0.1058,
"mean_token_accuracy": 0.9544935628771782,
"num_tokens": 1049239.0,
"step": 130
},
{
"epoch": 1.176842105263158,
"grad_norm": 0.09921601414680481,
"learning_rate": 0.00014344800361731027,
"loss": 0.1066,
"mean_token_accuracy": 0.9543229445815087,
"num_tokens": 1128853.0,
"step": 140
},
{
"epoch": 1.2610526315789474,
"grad_norm": 0.14502032101154327,
"learning_rate": 0.00013492668324572614,
"loss": 0.1057,
"mean_token_accuracy": 0.9546637967228889,
"num_tokens": 1207359.0,
"step": 150
},
{
"epoch": 1.2610526315789474,
"eval_loss": 0.10642929375171661,
"eval_mean_token_accuracy": 0.9541201224693885,
"eval_num_tokens": 1207359.0,
"eval_runtime": 3.3561,
"eval_samples_per_second": 29.796,
"eval_steps_per_second": 3.873,
"step": 150
},
{
"epoch": 1.345263157894737,
"grad_norm": 0.07445153594017029,
"learning_rate": 0.00012610562138799978,
"loss": 0.1054,
"mean_token_accuracy": 0.9534011006355285,
"num_tokens": 1291078.0,
"step": 160
},
{
"epoch": 1.4294736842105262,
"grad_norm": 0.0736021175980568,
"learning_rate": 0.00011706052057139335,
"loss": 0.1042,
"mean_token_accuracy": 0.9556787937879563,
"num_tokens": 1370250.0,
"step": 170
},
{
"epoch": 1.5136842105263157,
"grad_norm": 0.10876885801553726,
"learning_rate": 0.00010786900602994359,
"loss": 0.1048,
"mean_token_accuracy": 0.9544201374053956,
"num_tokens": 1449612.0,
"step": 180
},
{
"epoch": 1.5978947368421053,
"grad_norm": 0.10258373618125916,
"learning_rate": 9.860995952316851e-05,
"loss": 0.1035,
"mean_token_accuracy": 0.9549568623304368,
"num_tokens": 1532874.0,
"step": 190
},
{
"epoch": 1.6821052631578948,
"grad_norm": 0.09369785338640213,
"learning_rate": 8.936284237124778e-05,
"loss": 0.1044,
"mean_token_accuracy": 0.9551588267087936,
"num_tokens": 1611823.0,
"step": 200
},
{
"epoch": 1.6821052631578948,
"eval_loss": 0.10525871068239212,
"eval_mean_token_accuracy": 0.9543446852610662,
"eval_num_tokens": 1611823.0,
"eval_runtime": 3.4604,
"eval_samples_per_second": 28.899,
"eval_steps_per_second": 3.757,
"step": 200
},
{
"epoch": 1.7663157894736843,
"grad_norm": 0.09880904853343964,
"learning_rate": 8.020701351640182e-05,
"loss": 0.1045,
"mean_token_accuracy": 0.9547461196780205,
"num_tokens": 1692230.0,
"step": 210
},
{
"epoch": 1.8505263157894736,
"grad_norm": 0.07497906684875488,
"learning_rate": 7.122104846288064e-05,
"loss": 0.1024,
"mean_token_accuracy": 0.9548232913017273,
"num_tokens": 1774458.0,
"step": 220
},
{
"epoch": 1.9347368421052633,
"grad_norm": 0.09438126534223557,
"learning_rate": 6.248206494043313e-05,
"loss": 0.104,
"mean_token_accuracy": 0.9549880161881447,
"num_tokens": 1853023.0,
"step": 230
},
{
"epoch": 2.0168421052631578,
"grad_norm": 0.09313005208969116,
"learning_rate": 5.4065061078425315e-05,
"loss": 0.103,
"mean_token_accuracy": 0.9549200183305985,
"num_tokens": 1930647.0,
"step": 240
},
{
"epoch": 2.1010526315789475,
"grad_norm": 0.07283347845077515,
"learning_rate": 4.604227177041156e-05,
"loss": 0.1019,
"mean_token_accuracy": 0.9548763617873192,
"num_tokens": 2014768.0,
"step": 250
},
{
"epoch": 2.1010526315789475,
"eval_loss": 0.10490619391202927,
"eval_mean_token_accuracy": 0.9543710763637836,
"eval_num_tokens": 2014768.0,
"eval_runtime": 3.4547,
"eval_samples_per_second": 28.946,
"eval_steps_per_second": 3.763,
"step": 250
},
{
"epoch": 2.185263157894737,
"grad_norm": 0.0957074984908104,
"learning_rate": 3.848254875285e-05,
"loss": 0.103,
"mean_token_accuracy": 0.955729816854,
"num_tokens": 2094236.0,
"step": 260
},
{
"epoch": 2.269473684210526,
"grad_norm": 0.08435027301311493,
"learning_rate": 3.145076971813891e-05,
"loss": 0.1027,
"mean_token_accuracy": 0.9557207286357879,
"num_tokens": 2173722.0,
"step": 270
},
{
"epoch": 2.353684210526316,
"grad_norm": 0.07077847421169281,
"learning_rate": 2.500728153297788e-05,
"loss": 0.102,
"mean_token_accuracy": 0.9550597742199898,
"num_tokens": 2256619.0,
"step": 280
},
{
"epoch": 2.437894736842105,
"grad_norm": 0.07486104220151901,
"learning_rate": 1.9207382340364634e-05,
"loss": 0.1024,
"mean_token_accuracy": 0.9555209368467331,
"num_tokens": 2335617.0,
"step": 290
},
{
"epoch": 2.522105263157895,
"grad_norm": 0.06978271156549454,
"learning_rate": 1.41008469898387e-05,
"loss": 0.1031,
"mean_token_accuracy": 0.9553255036473274,
"num_tokens": 2415889.0,
"step": 300
},
{
"epoch": 2.522105263157895,
"eval_loss": 0.10419522225856781,
"eval_mean_token_accuracy": 0.9541570269144498,
"eval_num_tokens": 2415889.0,
"eval_runtime": 3.5101,
"eval_samples_per_second": 28.489,
"eval_steps_per_second": 3.704,
"step": 300
},
{
"epoch": 2.606315789473684,
"grad_norm": 0.07363743335008621,
"learning_rate": 9.731499868738447e-06,
"loss": 0.1016,
"mean_token_accuracy": 0.9559778571128845,
"num_tokens": 2498011.0,
"step": 310
},
{
"epoch": 2.690526315789474,
"grad_norm": 0.07047491520643234,
"learning_rate": 6.136838800442457e-06,
"loss": 0.1026,
"mean_token_accuracy": 0.9554199263453483,
"num_tokens": 2576551.0,
"step": 320
},
{
"epoch": 2.774736842105263,
"grad_norm": 0.07501808553934097,
"learning_rate": 3.3477132373081254e-06,
"loss": 0.1014,
"mean_token_accuracy": 0.9564116463065148,
"num_tokens": 2657833.0,
"step": 330
},
{
"epoch": 2.8589473684210525,
"grad_norm": 0.07335389405488968,
"learning_rate": 1.3880595100613792e-06,
"loss": 0.1023,
"mean_token_accuracy": 0.9557045891880989,
"num_tokens": 2739348.0,
"step": 340
},
{
"epoch": 2.943157894736842,
"grad_norm": 0.06935805082321167,
"learning_rate": 2.746954057333606e-07,
"loss": 0.102,
"mean_token_accuracy": 0.956343337893486,
"num_tokens": 2817342.0,
"step": 350
},
{
"epoch": 2.943157894736842,
"eval_loss": 0.10404938459396362,
"eval_mean_token_accuracy": 0.9544462653306814,
"eval_num_tokens": 2817342.0,
"eval_runtime": 3.5152,
"eval_samples_per_second": 28.448,
"eval_steps_per_second": 3.698,
"step": 350
}
],
"logging_steps": 10,
"max_steps": 357,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2253434919571866e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}