chenyn66's picture
Add Qwen2.5-Math-7B-multibacktrack-sft-n2-still-numina-math-ckpt444
40fa3ea verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9994372537985369,
"eval_steps": 500,
"global_step": 444,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022509848058525603,
"grad_norm": 4.327241897583008,
"learning_rate": 2.222222222222222e-06,
"loss": 8.5214,
"step": 10
},
{
"epoch": 0.04501969611705121,
"grad_norm": 1.7264961004257202,
"learning_rate": 4.444444444444444e-06,
"loss": 7.5546,
"step": 20
},
{
"epoch": 0.06752954417557681,
"grad_norm": 1.0628855228424072,
"learning_rate": 6.666666666666667e-06,
"loss": 5.7526,
"step": 30
},
{
"epoch": 0.09003939223410241,
"grad_norm": 0.6295533776283264,
"learning_rate": 8.888888888888888e-06,
"loss": 4.9589,
"step": 40
},
{
"epoch": 0.11254924029262803,
"grad_norm": 0.5138620138168335,
"learning_rate": 9.87468671679198e-06,
"loss": 4.6339,
"step": 50
},
{
"epoch": 0.13505908835115363,
"grad_norm": 0.502289891242981,
"learning_rate": 9.62406015037594e-06,
"loss": 4.6855,
"step": 60
},
{
"epoch": 0.15756893640967923,
"grad_norm": 0.3467671871185303,
"learning_rate": 9.3734335839599e-06,
"loss": 4.4222,
"step": 70
},
{
"epoch": 0.18007878446820483,
"grad_norm": 0.35326918959617615,
"learning_rate": 9.12280701754386e-06,
"loss": 4.4481,
"step": 80
},
{
"epoch": 0.20258863252673046,
"grad_norm": 0.3369162976741791,
"learning_rate": 8.87218045112782e-06,
"loss": 4.3478,
"step": 90
},
{
"epoch": 0.22509848058525606,
"grad_norm": 0.3802469074726105,
"learning_rate": 8.62155388471178e-06,
"loss": 4.425,
"step": 100
},
{
"epoch": 0.24760832864378166,
"grad_norm": 0.3173699975013733,
"learning_rate": 8.370927318295739e-06,
"loss": 4.3924,
"step": 110
},
{
"epoch": 0.27011817670230726,
"grad_norm": 0.3183175325393677,
"learning_rate": 8.1203007518797e-06,
"loss": 4.2312,
"step": 120
},
{
"epoch": 0.2926280247608329,
"grad_norm": 0.30415669083595276,
"learning_rate": 7.86967418546366e-06,
"loss": 4.364,
"step": 130
},
{
"epoch": 0.31513787281935846,
"grad_norm": 0.3200981020927429,
"learning_rate": 7.61904761904762e-06,
"loss": 4.2182,
"step": 140
},
{
"epoch": 0.3376477208778841,
"grad_norm": 0.28655463457107544,
"learning_rate": 7.368421052631579e-06,
"loss": 4.2388,
"step": 150
},
{
"epoch": 0.36015756893640966,
"grad_norm": 0.35627660155296326,
"learning_rate": 7.117794486215539e-06,
"loss": 4.2385,
"step": 160
},
{
"epoch": 0.3826674169949353,
"grad_norm": 0.3535982072353363,
"learning_rate": 6.867167919799499e-06,
"loss": 4.256,
"step": 170
},
{
"epoch": 0.4051772650534609,
"grad_norm": 0.29762333631515503,
"learning_rate": 6.616541353383459e-06,
"loss": 4.1519,
"step": 180
},
{
"epoch": 0.4276871131119865,
"grad_norm": 0.29701927304267883,
"learning_rate": 6.365914786967419e-06,
"loss": 4.2534,
"step": 190
},
{
"epoch": 0.4501969611705121,
"grad_norm": 0.3369089961051941,
"learning_rate": 6.115288220551378e-06,
"loss": 4.2689,
"step": 200
},
{
"epoch": 0.4727068092290377,
"grad_norm": 0.28114861249923706,
"learning_rate": 5.864661654135339e-06,
"loss": 4.1793,
"step": 210
},
{
"epoch": 0.4952166572875633,
"grad_norm": 0.29363805055618286,
"learning_rate": 5.6140350877192985e-06,
"loss": 4.0821,
"step": 220
},
{
"epoch": 0.5177265053460889,
"grad_norm": 0.31713083386421204,
"learning_rate": 5.363408521303258e-06,
"loss": 4.1607,
"step": 230
},
{
"epoch": 0.5402363534046145,
"grad_norm": 0.2943900227546692,
"learning_rate": 5.112781954887218e-06,
"loss": 4.0969,
"step": 240
},
{
"epoch": 0.5627462014631401,
"grad_norm": 0.32129648327827454,
"learning_rate": 4.862155388471178e-06,
"loss": 4.2222,
"step": 250
},
{
"epoch": 0.5852560495216658,
"grad_norm": 0.27016839385032654,
"learning_rate": 4.611528822055138e-06,
"loss": 4.1283,
"step": 260
},
{
"epoch": 0.6077658975801913,
"grad_norm": 0.29647159576416016,
"learning_rate": 4.360902255639098e-06,
"loss": 4.0882,
"step": 270
},
{
"epoch": 0.6302757456387169,
"grad_norm": 0.31913793087005615,
"learning_rate": 4.110275689223058e-06,
"loss": 4.2142,
"step": 280
},
{
"epoch": 0.6527855936972425,
"grad_norm": 0.2767369747161865,
"learning_rate": 3.859649122807018e-06,
"loss": 4.1413,
"step": 290
},
{
"epoch": 0.6752954417557682,
"grad_norm": 0.30485114455223083,
"learning_rate": 3.6090225563909775e-06,
"loss": 4.0782,
"step": 300
},
{
"epoch": 0.6978052898142938,
"grad_norm": 0.26790645718574524,
"learning_rate": 3.3583959899749375e-06,
"loss": 4.1322,
"step": 310
},
{
"epoch": 0.7203151378728193,
"grad_norm": 0.29677528142929077,
"learning_rate": 3.107769423558897e-06,
"loss": 4.0987,
"step": 320
},
{
"epoch": 0.7428249859313449,
"grad_norm": 0.26743343472480774,
"learning_rate": 2.8571428571428573e-06,
"loss": 4.0469,
"step": 330
},
{
"epoch": 0.7653348339898706,
"grad_norm": 0.2823913097381592,
"learning_rate": 2.606516290726817e-06,
"loss": 3.977,
"step": 340
},
{
"epoch": 0.7878446820483962,
"grad_norm": 0.3154824674129486,
"learning_rate": 2.355889724310777e-06,
"loss": 4.0915,
"step": 350
},
{
"epoch": 0.8103545301069218,
"grad_norm": 0.27284637093544006,
"learning_rate": 2.105263157894737e-06,
"loss": 4.0195,
"step": 360
},
{
"epoch": 0.8328643781654473,
"grad_norm": 0.27475491166114807,
"learning_rate": 1.8546365914786967e-06,
"loss": 4.0891,
"step": 370
},
{
"epoch": 0.855374226223973,
"grad_norm": 0.27842697501182556,
"learning_rate": 1.6040100250626568e-06,
"loss": 3.9718,
"step": 380
},
{
"epoch": 0.8778840742824986,
"grad_norm": 0.28258708119392395,
"learning_rate": 1.3533834586466167e-06,
"loss": 4.1759,
"step": 390
},
{
"epoch": 0.9003939223410242,
"grad_norm": 0.25824299454689026,
"learning_rate": 1.1027568922305765e-06,
"loss": 4.0275,
"step": 400
},
{
"epoch": 0.9229037703995498,
"grad_norm": 0.269233375787735,
"learning_rate": 8.521303258145364e-07,
"loss": 3.9631,
"step": 410
},
{
"epoch": 0.9454136184580754,
"grad_norm": 0.3044830858707428,
"learning_rate": 6.015037593984962e-07,
"loss": 4.1979,
"step": 420
},
{
"epoch": 0.967923466516601,
"grad_norm": 0.2594878673553467,
"learning_rate": 3.5087719298245616e-07,
"loss": 4.0824,
"step": 430
},
{
"epoch": 0.9904333145751266,
"grad_norm": 0.26378872990608215,
"learning_rate": 1.0025062656641605e-07,
"loss": 3.9954,
"step": 440
}
],
"logging_steps": 10,
"max_steps": 444,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.625975204144218e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}