finBERT-WSB / trainer_state.json
TheRamsay's picture
Expose config and weights to root for AutoModel loading
7e0cf58
{
"best_global_step": 9530,
"best_metric": 0.6577488780021667,
"best_model_checkpoint": "./finbert_weighted_final/checkpoint-9530",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 19060,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01049317943336831,
"grad_norm": 4.134560585021973,
"learning_rate": 1.9896117523609656e-05,
"loss": 0.7054,
"step": 100
},
{
"epoch": 0.02098635886673662,
"grad_norm": 3.1322972774505615,
"learning_rate": 1.979118572927597e-05,
"loss": 0.7018,
"step": 200
},
{
"epoch": 0.03147953830010493,
"grad_norm": 1.025286316871643,
"learning_rate": 1.968625393494229e-05,
"loss": 0.6951,
"step": 300
},
{
"epoch": 0.04197271773347324,
"grad_norm": 2.2497143745422363,
"learning_rate": 1.9581322140608606e-05,
"loss": 0.6978,
"step": 400
},
{
"epoch": 0.05246589716684155,
"grad_norm": 1.647702932357788,
"learning_rate": 1.9476390346274924e-05,
"loss": 0.6892,
"step": 500
},
{
"epoch": 0.06295907660020986,
"grad_norm": 3.1609485149383545,
"learning_rate": 1.937145855194124e-05,
"loss": 0.6925,
"step": 600
},
{
"epoch": 0.07345225603357818,
"grad_norm": 1.7412222623825073,
"learning_rate": 1.9266526757607556e-05,
"loss": 0.6903,
"step": 700
},
{
"epoch": 0.08394543546694648,
"grad_norm": 2.2502248287200928,
"learning_rate": 1.9161594963273874e-05,
"loss": 0.6942,
"step": 800
},
{
"epoch": 0.0944386149003148,
"grad_norm": 2.354647636413574,
"learning_rate": 1.905666316894019e-05,
"loss": 0.6878,
"step": 900
},
{
"epoch": 0.1049317943336831,
"grad_norm": 4.833475589752197,
"learning_rate": 1.895173137460651e-05,
"loss": 0.6893,
"step": 1000
},
{
"epoch": 0.11542497376705142,
"grad_norm": 2.3594470024108887,
"learning_rate": 1.8846799580272824e-05,
"loss": 0.6848,
"step": 1100
},
{
"epoch": 0.1259181532004197,
"grad_norm": 2.3646111488342285,
"learning_rate": 1.874186778593914e-05,
"loss": 0.6885,
"step": 1200
},
{
"epoch": 0.13641133263378805,
"grad_norm": 1.8924506902694702,
"learning_rate": 1.863693599160546e-05,
"loss": 0.6828,
"step": 1300
},
{
"epoch": 0.14690451206715635,
"grad_norm": 1.7312902212142944,
"learning_rate": 1.8532004197271777e-05,
"loss": 0.6885,
"step": 1400
},
{
"epoch": 0.15739769150052466,
"grad_norm": 2.434694528579712,
"learning_rate": 1.842707240293809e-05,
"loss": 0.6929,
"step": 1500
},
{
"epoch": 0.16789087093389296,
"grad_norm": 2.263343572616577,
"learning_rate": 1.832214060860441e-05,
"loss": 0.6828,
"step": 1600
},
{
"epoch": 0.1783840503672613,
"grad_norm": 1.6868016719818115,
"learning_rate": 1.8217208814270727e-05,
"loss": 0.6706,
"step": 1700
},
{
"epoch": 0.1888772298006296,
"grad_norm": 3.530144691467285,
"learning_rate": 1.8112277019937045e-05,
"loss": 0.6833,
"step": 1800
},
{
"epoch": 0.1993704092339979,
"grad_norm": 5.920458793640137,
"learning_rate": 1.800734522560336e-05,
"loss": 0.682,
"step": 1900
},
{
"epoch": 0.2098635886673662,
"grad_norm": 1.9053469896316528,
"learning_rate": 1.7902413431269677e-05,
"loss": 0.6747,
"step": 2000
},
{
"epoch": 0.2203567681007345,
"grad_norm": 1.9532723426818848,
"learning_rate": 1.7797481636935994e-05,
"loss": 0.6695,
"step": 2100
},
{
"epoch": 0.23084994753410285,
"grad_norm": 1.5969197750091553,
"learning_rate": 1.769254984260231e-05,
"loss": 0.6836,
"step": 2200
},
{
"epoch": 0.24134312696747115,
"grad_norm": 2.2583141326904297,
"learning_rate": 1.7587618048268627e-05,
"loss": 0.6748,
"step": 2300
},
{
"epoch": 0.2518363064008394,
"grad_norm": 6.070931911468506,
"learning_rate": 1.7482686253934944e-05,
"loss": 0.682,
"step": 2400
},
{
"epoch": 0.2623294858342078,
"grad_norm": 4.128929138183594,
"learning_rate": 1.7377754459601262e-05,
"loss": 0.6849,
"step": 2500
},
{
"epoch": 0.2728226652675761,
"grad_norm": 5.5497565269470215,
"learning_rate": 1.7272822665267576e-05,
"loss": 0.6805,
"step": 2600
},
{
"epoch": 0.2833158447009444,
"grad_norm": 4.123847961425781,
"learning_rate": 1.7167890870933894e-05,
"loss": 0.6876,
"step": 2700
},
{
"epoch": 0.2938090241343127,
"grad_norm": 2.535865068435669,
"learning_rate": 1.7062959076600212e-05,
"loss": 0.6794,
"step": 2800
},
{
"epoch": 0.304302203567681,
"grad_norm": 2.873096466064453,
"learning_rate": 1.6958027282266526e-05,
"loss": 0.6625,
"step": 2900
},
{
"epoch": 0.3147953830010493,
"grad_norm": 2.8748557567596436,
"learning_rate": 1.6853095487932844e-05,
"loss": 0.6791,
"step": 3000
},
{
"epoch": 0.3252885624344176,
"grad_norm": 5.24050235748291,
"learning_rate": 1.6748163693599162e-05,
"loss": 0.6848,
"step": 3100
},
{
"epoch": 0.3357817418677859,
"grad_norm": 2.885096311569214,
"learning_rate": 1.664323189926548e-05,
"loss": 0.6849,
"step": 3200
},
{
"epoch": 0.3462749213011542,
"grad_norm": 3.2616255283355713,
"learning_rate": 1.6538300104931794e-05,
"loss": 0.6751,
"step": 3300
},
{
"epoch": 0.3567681007345226,
"grad_norm": 4.990833282470703,
"learning_rate": 1.643336831059811e-05,
"loss": 0.6715,
"step": 3400
},
{
"epoch": 0.3672612801678909,
"grad_norm": 4.264138221740723,
"learning_rate": 1.632843651626443e-05,
"loss": 0.6831,
"step": 3500
},
{
"epoch": 0.3777544596012592,
"grad_norm": 3.7659502029418945,
"learning_rate": 1.6223504721930747e-05,
"loss": 0.6794,
"step": 3600
},
{
"epoch": 0.3882476390346275,
"grad_norm": 3.0774195194244385,
"learning_rate": 1.611857292759706e-05,
"loss": 0.6787,
"step": 3700
},
{
"epoch": 0.3987408184679958,
"grad_norm": 4.424105167388916,
"learning_rate": 1.601364113326338e-05,
"loss": 0.6606,
"step": 3800
},
{
"epoch": 0.4092339979013641,
"grad_norm": 5.348263263702393,
"learning_rate": 1.5908709338929697e-05,
"loss": 0.6757,
"step": 3900
},
{
"epoch": 0.4197271773347324,
"grad_norm": 13.849942207336426,
"learning_rate": 1.580377754459601e-05,
"loss": 0.683,
"step": 4000
},
{
"epoch": 0.4302203567681007,
"grad_norm": 3.790433168411255,
"learning_rate": 1.569884575026233e-05,
"loss": 0.6713,
"step": 4100
},
{
"epoch": 0.440713536201469,
"grad_norm": 3.099285840988159,
"learning_rate": 1.5593913955928647e-05,
"loss": 0.6792,
"step": 4200
},
{
"epoch": 0.45120671563483733,
"grad_norm": 6.984988689422607,
"learning_rate": 1.5488982161594965e-05,
"loss": 0.6628,
"step": 4300
},
{
"epoch": 0.4616998950682057,
"grad_norm": 2.477212905883789,
"learning_rate": 1.538405036726128e-05,
"loss": 0.6701,
"step": 4400
},
{
"epoch": 0.472193074501574,
"grad_norm": 3.1472208499908447,
"learning_rate": 1.5279118572927597e-05,
"loss": 0.6749,
"step": 4500
},
{
"epoch": 0.4826862539349423,
"grad_norm": 3.234646797180176,
"learning_rate": 1.5174186778593916e-05,
"loss": 0.6721,
"step": 4600
},
{
"epoch": 0.4931794333683106,
"grad_norm": 4.641822338104248,
"learning_rate": 1.5069254984260234e-05,
"loss": 0.6657,
"step": 4700
},
{
"epoch": 0.5036726128016789,
"grad_norm": 2.161076545715332,
"learning_rate": 1.4964323189926548e-05,
"loss": 0.6814,
"step": 4800
},
{
"epoch": 0.5141657922350472,
"grad_norm": 3.0307767391204834,
"learning_rate": 1.4859391395592866e-05,
"loss": 0.6657,
"step": 4900
},
{
"epoch": 0.5246589716684156,
"grad_norm": 4.245023727416992,
"learning_rate": 1.4754459601259184e-05,
"loss": 0.6689,
"step": 5000
},
{
"epoch": 0.5351521511017838,
"grad_norm": 1.969188928604126,
"learning_rate": 1.4649527806925498e-05,
"loss": 0.6808,
"step": 5100
},
{
"epoch": 0.5456453305351522,
"grad_norm": 2.4927730560302734,
"learning_rate": 1.4544596012591816e-05,
"loss": 0.6771,
"step": 5200
},
{
"epoch": 0.5561385099685204,
"grad_norm": 2.748563289642334,
"learning_rate": 1.4439664218258134e-05,
"loss": 0.6631,
"step": 5300
},
{
"epoch": 0.5666316894018888,
"grad_norm": 2.897603988647461,
"learning_rate": 1.4334732423924451e-05,
"loss": 0.6673,
"step": 5400
},
{
"epoch": 0.577124868835257,
"grad_norm": 4.7978434562683105,
"learning_rate": 1.4229800629590766e-05,
"loss": 0.6658,
"step": 5500
},
{
"epoch": 0.5876180482686254,
"grad_norm": 2.886988878250122,
"learning_rate": 1.4124868835257084e-05,
"loss": 0.6654,
"step": 5600
},
{
"epoch": 0.5981112277019937,
"grad_norm": 1.771572470664978,
"learning_rate": 1.4019937040923401e-05,
"loss": 0.6699,
"step": 5700
},
{
"epoch": 0.608604407135362,
"grad_norm": 5.75907564163208,
"learning_rate": 1.3915005246589717e-05,
"loss": 0.6611,
"step": 5800
},
{
"epoch": 0.6190975865687304,
"grad_norm": 2.745943307876587,
"learning_rate": 1.3810073452256033e-05,
"loss": 0.6725,
"step": 5900
},
{
"epoch": 0.6295907660020986,
"grad_norm": 5.2041096687316895,
"learning_rate": 1.3705141657922351e-05,
"loss": 0.6726,
"step": 6000
},
{
"epoch": 0.640083945435467,
"grad_norm": 2.5480427742004395,
"learning_rate": 1.3600209863588669e-05,
"loss": 0.6696,
"step": 6100
},
{
"epoch": 0.6505771248688352,
"grad_norm": 3.4199516773223877,
"learning_rate": 1.3495278069254985e-05,
"loss": 0.6661,
"step": 6200
},
{
"epoch": 0.6610703043022036,
"grad_norm": 2.6629273891448975,
"learning_rate": 1.3390346274921303e-05,
"loss": 0.662,
"step": 6300
},
{
"epoch": 0.6715634837355718,
"grad_norm": 3.0862207412719727,
"learning_rate": 1.3285414480587619e-05,
"loss": 0.6717,
"step": 6400
},
{
"epoch": 0.6820566631689402,
"grad_norm": 3.0523600578308105,
"learning_rate": 1.3180482686253937e-05,
"loss": 0.6652,
"step": 6500
},
{
"epoch": 0.6925498426023085,
"grad_norm": 2.254683494567871,
"learning_rate": 1.3075550891920253e-05,
"loss": 0.6719,
"step": 6600
},
{
"epoch": 0.7030430220356768,
"grad_norm": 2.2026445865631104,
"learning_rate": 1.297061909758657e-05,
"loss": 0.6637,
"step": 6700
},
{
"epoch": 0.7135362014690452,
"grad_norm": 1.478987455368042,
"learning_rate": 1.2865687303252886e-05,
"loss": 0.6684,
"step": 6800
},
{
"epoch": 0.7240293809024134,
"grad_norm": 5.10315465927124,
"learning_rate": 1.2760755508919203e-05,
"loss": 0.6715,
"step": 6900
},
{
"epoch": 0.7345225603357818,
"grad_norm": 3.607482433319092,
"learning_rate": 1.265582371458552e-05,
"loss": 0.6831,
"step": 7000
},
{
"epoch": 0.74501573976915,
"grad_norm": 3.3320655822753906,
"learning_rate": 1.2550891920251838e-05,
"loss": 0.6608,
"step": 7100
},
{
"epoch": 0.7555089192025184,
"grad_norm": 2.87164306640625,
"learning_rate": 1.2445960125918156e-05,
"loss": 0.6716,
"step": 7200
},
{
"epoch": 0.7660020986358866,
"grad_norm": 3.3101062774658203,
"learning_rate": 1.234102833158447e-05,
"loss": 0.6709,
"step": 7300
},
{
"epoch": 0.776495278069255,
"grad_norm": 3.6823277473449707,
"learning_rate": 1.2236096537250788e-05,
"loss": 0.6616,
"step": 7400
},
{
"epoch": 0.7869884575026233,
"grad_norm": 5.88408088684082,
"learning_rate": 1.2131164742917106e-05,
"loss": 0.6623,
"step": 7500
},
{
"epoch": 0.7974816369359916,
"grad_norm": 1.64940345287323,
"learning_rate": 1.2026232948583423e-05,
"loss": 0.6784,
"step": 7600
},
{
"epoch": 0.8079748163693599,
"grad_norm": 2.7837324142456055,
"learning_rate": 1.1921301154249738e-05,
"loss": 0.6661,
"step": 7700
},
{
"epoch": 0.8184679958027282,
"grad_norm": 2.218055248260498,
"learning_rate": 1.1816369359916056e-05,
"loss": 0.6651,
"step": 7800
},
{
"epoch": 0.8289611752360966,
"grad_norm": 2.035811185836792,
"learning_rate": 1.1711437565582373e-05,
"loss": 0.6671,
"step": 7900
},
{
"epoch": 0.8394543546694648,
"grad_norm": 2.858041524887085,
"learning_rate": 1.1606505771248688e-05,
"loss": 0.6729,
"step": 8000
},
{
"epoch": 0.8499475341028332,
"grad_norm": 3.871108293533325,
"learning_rate": 1.1501573976915005e-05,
"loss": 0.6607,
"step": 8100
},
{
"epoch": 0.8604407135362014,
"grad_norm": 2.4229085445404053,
"learning_rate": 1.1396642182581323e-05,
"loss": 0.6654,
"step": 8200
},
{
"epoch": 0.8709338929695698,
"grad_norm": 5.086295127868652,
"learning_rate": 1.1291710388247641e-05,
"loss": 0.6782,
"step": 8300
},
{
"epoch": 0.881427072402938,
"grad_norm": 2.7656631469726562,
"learning_rate": 1.1186778593913957e-05,
"loss": 0.6678,
"step": 8400
},
{
"epoch": 0.8919202518363064,
"grad_norm": 4.321835041046143,
"learning_rate": 1.1081846799580273e-05,
"loss": 0.6627,
"step": 8500
},
{
"epoch": 0.9024134312696747,
"grad_norm": 3.4984049797058105,
"learning_rate": 1.097691500524659e-05,
"loss": 0.6616,
"step": 8600
},
{
"epoch": 0.912906610703043,
"grad_norm": 2.042647123336792,
"learning_rate": 1.0871983210912907e-05,
"loss": 0.6488,
"step": 8700
},
{
"epoch": 0.9233997901364114,
"grad_norm": 2.3309738636016846,
"learning_rate": 1.0767051416579225e-05,
"loss": 0.6608,
"step": 8800
},
{
"epoch": 0.9338929695697796,
"grad_norm": 2.525007486343384,
"learning_rate": 1.066211962224554e-05,
"loss": 0.658,
"step": 8900
},
{
"epoch": 0.944386149003148,
"grad_norm": 2.533332347869873,
"learning_rate": 1.0557187827911858e-05,
"loss": 0.6626,
"step": 9000
},
{
"epoch": 0.9548793284365162,
"grad_norm": 2.3524672985076904,
"learning_rate": 1.0452256033578174e-05,
"loss": 0.6698,
"step": 9100
},
{
"epoch": 0.9653725078698846,
"grad_norm": 2.1326098442077637,
"learning_rate": 1.0347324239244492e-05,
"loss": 0.6622,
"step": 9200
},
{
"epoch": 0.9758656873032528,
"grad_norm": 2.8159704208374023,
"learning_rate": 1.024239244491081e-05,
"loss": 0.6686,
"step": 9300
},
{
"epoch": 0.9863588667366212,
"grad_norm": 3.572868824005127,
"learning_rate": 1.0137460650577126e-05,
"loss": 0.6637,
"step": 9400
},
{
"epoch": 0.9968520461699895,
"grad_norm": 2.862517833709717,
"learning_rate": 1.0032528856243442e-05,
"loss": 0.66,
"step": 9500
},
{
"epoch": 1.0,
"eval_accuracy": 0.5864637985309549,
"eval_f1": 0.5137569401603949,
"eval_loss": 0.6577488780021667,
"eval_precision": 0.4279767716737756,
"eval_recall": 0.6425430136563537,
"eval_runtime": 41.917,
"eval_samples_per_second": 909.416,
"eval_steps_per_second": 113.677,
"step": 9530
},
{
"epoch": 1.0073452256033577,
"grad_norm": 4.176674842834473,
"learning_rate": 9.92759706190976e-06,
"loss": 0.6298,
"step": 9600
},
{
"epoch": 1.0178384050367262,
"grad_norm": 5.948734760284424,
"learning_rate": 9.822665267576076e-06,
"loss": 0.6389,
"step": 9700
},
{
"epoch": 1.0283315844700944,
"grad_norm": 2.9435501098632812,
"learning_rate": 9.717733473242394e-06,
"loss": 0.6418,
"step": 9800
},
{
"epoch": 1.0388247639034627,
"grad_norm": 3.332951545715332,
"learning_rate": 9.61280167890871e-06,
"loss": 0.6393,
"step": 9900
},
{
"epoch": 1.0493179433368311,
"grad_norm": 3.7592711448669434,
"learning_rate": 9.507869884575027e-06,
"loss": 0.6334,
"step": 10000
},
{
"epoch": 1.0598111227701994,
"grad_norm": 4.113369464874268,
"learning_rate": 9.402938090241344e-06,
"loss": 0.6324,
"step": 10100
},
{
"epoch": 1.0703043022035676,
"grad_norm": 4.169111251831055,
"learning_rate": 9.298006295907661e-06,
"loss": 0.6195,
"step": 10200
},
{
"epoch": 1.080797481636936,
"grad_norm": 6.074437141418457,
"learning_rate": 9.193074501573977e-06,
"loss": 0.6336,
"step": 10300
},
{
"epoch": 1.0912906610703044,
"grad_norm": 5.635167121887207,
"learning_rate": 9.088142707240295e-06,
"loss": 0.6235,
"step": 10400
},
{
"epoch": 1.1017838405036726,
"grad_norm": 3.1099581718444824,
"learning_rate": 8.983210912906611e-06,
"loss": 0.6332,
"step": 10500
},
{
"epoch": 1.1122770199370409,
"grad_norm": 3.938689708709717,
"learning_rate": 8.878279118572927e-06,
"loss": 0.6337,
"step": 10600
},
{
"epoch": 1.1227701993704091,
"grad_norm": 3.1300745010375977,
"learning_rate": 8.773347324239245e-06,
"loss": 0.626,
"step": 10700
},
{
"epoch": 1.1332633788037776,
"grad_norm": 4.025566101074219,
"learning_rate": 8.668415529905561e-06,
"loss": 0.6316,
"step": 10800
},
{
"epoch": 1.1437565582371458,
"grad_norm": 2.0796873569488525,
"learning_rate": 8.563483735571879e-06,
"loss": 0.6445,
"step": 10900
},
{
"epoch": 1.154249737670514,
"grad_norm": 6.824429512023926,
"learning_rate": 8.458551941238195e-06,
"loss": 0.6291,
"step": 11000
},
{
"epoch": 1.1647429171038826,
"grad_norm": 3.644653797149658,
"learning_rate": 8.353620146904513e-06,
"loss": 0.6391,
"step": 11100
},
{
"epoch": 1.1752360965372508,
"grad_norm": 3.249530792236328,
"learning_rate": 8.248688352570829e-06,
"loss": 0.645,
"step": 11200
},
{
"epoch": 1.185729275970619,
"grad_norm": 2.6394639015197754,
"learning_rate": 8.143756558237146e-06,
"loss": 0.6523,
"step": 11300
},
{
"epoch": 1.1962224554039875,
"grad_norm": 4.585180282592773,
"learning_rate": 8.038824763903464e-06,
"loss": 0.6355,
"step": 11400
},
{
"epoch": 1.2067156348373558,
"grad_norm": 3.901735782623291,
"learning_rate": 7.93389296956978e-06,
"loss": 0.646,
"step": 11500
},
{
"epoch": 1.217208814270724,
"grad_norm": 3.5547521114349365,
"learning_rate": 7.828961175236098e-06,
"loss": 0.6289,
"step": 11600
},
{
"epoch": 1.2277019937040923,
"grad_norm": 3.6690990924835205,
"learning_rate": 7.724029380902414e-06,
"loss": 0.6365,
"step": 11700
},
{
"epoch": 1.2381951731374607,
"grad_norm": 3.4228599071502686,
"learning_rate": 7.619097586568731e-06,
"loss": 0.6313,
"step": 11800
},
{
"epoch": 1.248688352570829,
"grad_norm": 5.294908046722412,
"learning_rate": 7.514165792235048e-06,
"loss": 0.6206,
"step": 11900
},
{
"epoch": 1.2591815320041972,
"grad_norm": 4.170722007751465,
"learning_rate": 7.409233997901365e-06,
"loss": 0.6131,
"step": 12000
},
{
"epoch": 1.2696747114375655,
"grad_norm": 4.095099925994873,
"learning_rate": 7.304302203567682e-06,
"loss": 0.6353,
"step": 12100
},
{
"epoch": 1.280167890870934,
"grad_norm": 3.773933172225952,
"learning_rate": 7.1993704092339985e-06,
"loss": 0.6431,
"step": 12200
},
{
"epoch": 1.2906610703043022,
"grad_norm": 4.165128707885742,
"learning_rate": 7.0944386149003154e-06,
"loss": 0.6279,
"step": 12300
},
{
"epoch": 1.3011542497376705,
"grad_norm": 5.304838180541992,
"learning_rate": 6.989506820566632e-06,
"loss": 0.6168,
"step": 12400
},
{
"epoch": 1.311647429171039,
"grad_norm": 4.008569717407227,
"learning_rate": 6.884575026232949e-06,
"loss": 0.6162,
"step": 12500
},
{
"epoch": 1.3221406086044072,
"grad_norm": 7.424976348876953,
"learning_rate": 6.779643231899265e-06,
"loss": 0.6382,
"step": 12600
},
{
"epoch": 1.3326337880377754,
"grad_norm": 3.9119601249694824,
"learning_rate": 6.674711437565583e-06,
"loss": 0.6359,
"step": 12700
},
{
"epoch": 1.3431269674711437,
"grad_norm": 2.6989879608154297,
"learning_rate": 6.569779643231899e-06,
"loss": 0.6365,
"step": 12800
},
{
"epoch": 1.353620146904512,
"grad_norm": 3.548513650894165,
"learning_rate": 6.464847848898217e-06,
"loss": 0.6291,
"step": 12900
},
{
"epoch": 1.3641133263378804,
"grad_norm": 4.189005374908447,
"learning_rate": 6.359916054564533e-06,
"loss": 0.6171,
"step": 13000
},
{
"epoch": 1.3746065057712487,
"grad_norm": 4.647156715393066,
"learning_rate": 6.254984260230851e-06,
"loss": 0.6121,
"step": 13100
},
{
"epoch": 1.385099685204617,
"grad_norm": 6.423859119415283,
"learning_rate": 6.150052465897168e-06,
"loss": 0.6229,
"step": 13200
},
{
"epoch": 1.3955928646379854,
"grad_norm": 3.7313272953033447,
"learning_rate": 6.0451206715634845e-06,
"loss": 0.6193,
"step": 13300
},
{
"epoch": 1.4060860440713536,
"grad_norm": 4.396394729614258,
"learning_rate": 5.940188877229801e-06,
"loss": 0.6303,
"step": 13400
},
{
"epoch": 1.4165792235047219,
"grad_norm": 4.366927146911621,
"learning_rate": 5.8352570828961175e-06,
"loss": 0.6349,
"step": 13500
},
{
"epoch": 1.4270724029380903,
"grad_norm": 3.679366111755371,
"learning_rate": 5.730325288562435e-06,
"loss": 0.6188,
"step": 13600
},
{
"epoch": 1.4375655823714586,
"grad_norm": 6.429540157318115,
"learning_rate": 5.625393494228751e-06,
"loss": 0.6161,
"step": 13700
},
{
"epoch": 1.4480587618048268,
"grad_norm": 4.448658466339111,
"learning_rate": 5.520461699895069e-06,
"loss": 0.6179,
"step": 13800
},
{
"epoch": 1.458551941238195,
"grad_norm": 5.556158542633057,
"learning_rate": 5.415529905561385e-06,
"loss": 0.6176,
"step": 13900
},
{
"epoch": 1.4690451206715636,
"grad_norm": 6.288188457489014,
"learning_rate": 5.310598111227703e-06,
"loss": 0.6243,
"step": 14000
},
{
"epoch": 1.4795383001049318,
"grad_norm": 4.509946823120117,
"learning_rate": 5.205666316894019e-06,
"loss": 0.6395,
"step": 14100
},
{
"epoch": 1.4900314795383,
"grad_norm": 2.5123424530029297,
"learning_rate": 5.100734522560337e-06,
"loss": 0.642,
"step": 14200
},
{
"epoch": 1.5005246589716683,
"grad_norm": 5.663776397705078,
"learning_rate": 4.995802728226653e-06,
"loss": 0.6174,
"step": 14300
},
{
"epoch": 1.5110178384050368,
"grad_norm": 3.452349901199341,
"learning_rate": 4.89087093389297e-06,
"loss": 0.6026,
"step": 14400
},
{
"epoch": 1.521511017838405,
"grad_norm": 3.6340229511260986,
"learning_rate": 4.7859391395592865e-06,
"loss": 0.6163,
"step": 14500
},
{
"epoch": 1.5320041972717733,
"grad_norm": 3.506894588470459,
"learning_rate": 4.6810073452256034e-06,
"loss": 0.6287,
"step": 14600
},
{
"epoch": 1.5424973767051418,
"grad_norm": 4.924068450927734,
"learning_rate": 4.576075550891921e-06,
"loss": 0.6254,
"step": 14700
},
{
"epoch": 1.55299055613851,
"grad_norm": 6.727036476135254,
"learning_rate": 4.471143756558238e-06,
"loss": 0.6187,
"step": 14800
},
{
"epoch": 1.5634837355718783,
"grad_norm": 4.4192399978637695,
"learning_rate": 4.366211962224554e-06,
"loss": 0.6308,
"step": 14900
},
{
"epoch": 1.5739769150052467,
"grad_norm": 3.18361496925354,
"learning_rate": 4.261280167890871e-06,
"loss": 0.6423,
"step": 15000
},
{
"epoch": 1.5844700944386148,
"grad_norm": 3.8575243949890137,
"learning_rate": 4.156348373557188e-06,
"loss": 0.6202,
"step": 15100
},
{
"epoch": 1.5949632738719832,
"grad_norm": 5.774935722351074,
"learning_rate": 4.051416579223505e-06,
"loss": 0.6226,
"step": 15200
},
{
"epoch": 1.6054564533053515,
"grad_norm": 5.742249488830566,
"learning_rate": 3.946484784889822e-06,
"loss": 0.6291,
"step": 15300
},
{
"epoch": 1.6159496327387197,
"grad_norm": 4.477142810821533,
"learning_rate": 3.841552990556139e-06,
"loss": 0.627,
"step": 15400
},
{
"epoch": 1.6264428121720882,
"grad_norm": 4.507772922515869,
"learning_rate": 3.7366211962224556e-06,
"loss": 0.6309,
"step": 15500
},
{
"epoch": 1.6369359916054564,
"grad_norm": 4.460930347442627,
"learning_rate": 3.631689401888773e-06,
"loss": 0.6242,
"step": 15600
},
{
"epoch": 1.6474291710388247,
"grad_norm": 4.099055290222168,
"learning_rate": 3.52675760755509e-06,
"loss": 0.6242,
"step": 15700
},
{
"epoch": 1.6579223504721932,
"grad_norm": 5.501063346862793,
"learning_rate": 3.4218258132214067e-06,
"loss": 0.6207,
"step": 15800
},
{
"epoch": 1.6684155299055614,
"grad_norm": 5.495902061462402,
"learning_rate": 3.316894018887723e-06,
"loss": 0.6197,
"step": 15900
},
{
"epoch": 1.6789087093389297,
"grad_norm": 5.21685266494751,
"learning_rate": 3.21196222455404e-06,
"loss": 0.6275,
"step": 16000
},
{
"epoch": 1.6894018887722981,
"grad_norm": 3.404468536376953,
"learning_rate": 3.107030430220357e-06,
"loss": 0.6333,
"step": 16100
},
{
"epoch": 1.6998950682056662,
"grad_norm": 5.716117858886719,
"learning_rate": 3.002098635886674e-06,
"loss": 0.6244,
"step": 16200
},
{
"epoch": 1.7103882476390346,
"grad_norm": 4.773796081542969,
"learning_rate": 2.897166841552991e-06,
"loss": 0.6203,
"step": 16300
},
{
"epoch": 1.720881427072403,
"grad_norm": 4.482935905456543,
"learning_rate": 2.7922350472193077e-06,
"loss": 0.6143,
"step": 16400
},
{
"epoch": 1.7313746065057711,
"grad_norm": 4.8394341468811035,
"learning_rate": 2.6873032528856246e-06,
"loss": 0.6325,
"step": 16500
},
{
"epoch": 1.7418677859391396,
"grad_norm": 5.177644729614258,
"learning_rate": 2.5823714585519415e-06,
"loss": 0.6093,
"step": 16600
},
{
"epoch": 1.7523609653725079,
"grad_norm": 5.235119342803955,
"learning_rate": 2.4774396642182585e-06,
"loss": 0.6218,
"step": 16700
},
{
"epoch": 1.762854144805876,
"grad_norm": 4.899617671966553,
"learning_rate": 2.3725078698845754e-06,
"loss": 0.6102,
"step": 16800
},
{
"epoch": 1.7733473242392446,
"grad_norm": 6.910520076751709,
"learning_rate": 2.2675760755508923e-06,
"loss": 0.6138,
"step": 16900
},
{
"epoch": 1.7838405036726128,
"grad_norm": 4.011341094970703,
"learning_rate": 2.162644281217209e-06,
"loss": 0.6367,
"step": 17000
},
{
"epoch": 1.794333683105981,
"grad_norm": 4.2056427001953125,
"learning_rate": 2.0577124868835257e-06,
"loss": 0.6297,
"step": 17100
},
{
"epoch": 1.8048268625393495,
"grad_norm": 5.336106777191162,
"learning_rate": 1.9527806925498426e-06,
"loss": 0.6315,
"step": 17200
},
{
"epoch": 1.8153200419727176,
"grad_norm": 5.930999279022217,
"learning_rate": 1.8478488982161597e-06,
"loss": 0.6202,
"step": 17300
},
{
"epoch": 1.825813221406086,
"grad_norm": 3.3669204711914062,
"learning_rate": 1.7429171038824766e-06,
"loss": 0.6275,
"step": 17400
},
{
"epoch": 1.8363064008394545,
"grad_norm": 3.4519994258880615,
"learning_rate": 1.6379853095487935e-06,
"loss": 0.6257,
"step": 17500
},
{
"epoch": 1.8467995802728225,
"grad_norm": 4.662219524383545,
"learning_rate": 1.5330535152151102e-06,
"loss": 0.599,
"step": 17600
},
{
"epoch": 1.857292759706191,
"grad_norm": 5.023181438446045,
"learning_rate": 1.428121720881427e-06,
"loss": 0.6366,
"step": 17700
},
{
"epoch": 1.8677859391395593,
"grad_norm": 7.75839900970459,
"learning_rate": 1.323189926547744e-06,
"loss": 0.6181,
"step": 17800
},
{
"epoch": 1.8782791185729275,
"grad_norm": 3.3768789768218994,
"learning_rate": 1.2182581322140611e-06,
"loss": 0.6293,
"step": 17900
},
{
"epoch": 1.888772298006296,
"grad_norm": 4.408294200897217,
"learning_rate": 1.1133263378803778e-06,
"loss": 0.6053,
"step": 18000
},
{
"epoch": 1.8992654774396642,
"grad_norm": 5.284770488739014,
"learning_rate": 1.0083945435466947e-06,
"loss": 0.6257,
"step": 18100
},
{
"epoch": 1.9097586568730325,
"grad_norm": 3.856238842010498,
"learning_rate": 9.034627492130116e-07,
"loss": 0.6239,
"step": 18200
},
{
"epoch": 1.920251836306401,
"grad_norm": 3.986468553543091,
"learning_rate": 7.985309548793285e-07,
"loss": 0.6058,
"step": 18300
},
{
"epoch": 1.9307450157397692,
"grad_norm": 4.508366584777832,
"learning_rate": 6.935991605456453e-07,
"loss": 0.6168,
"step": 18400
},
{
"epoch": 1.9412381951731374,
"grad_norm": 3.7954189777374268,
"learning_rate": 5.886673662119622e-07,
"loss": 0.6143,
"step": 18500
},
{
"epoch": 1.951731374606506,
"grad_norm": 4.71843957901001,
"learning_rate": 4.837355718782791e-07,
"loss": 0.6153,
"step": 18600
},
{
"epoch": 1.962224554039874,
"grad_norm": 5.512548446655273,
"learning_rate": 3.7880377754459604e-07,
"loss": 0.6101,
"step": 18700
},
{
"epoch": 1.9727177334732424,
"grad_norm": 4.672413349151611,
"learning_rate": 2.7387198321091295e-07,
"loss": 0.6149,
"step": 18800
},
{
"epoch": 1.9832109129066107,
"grad_norm": 3.992551326751709,
"learning_rate": 1.689401888772298e-07,
"loss": 0.6431,
"step": 18900
},
{
"epoch": 1.993704092339979,
"grad_norm": 3.957615375518799,
"learning_rate": 6.40083945435467e-08,
"loss": 0.6107,
"step": 19000
},
{
"epoch": 2.0,
"eval_accuracy": 0.6031217208814271,
"eval_f1": 0.515732530968919,
"eval_loss": 0.6643054485321045,
"eval_precision": 0.44070021881838073,
"eval_recall": 0.6215569786281923,
"eval_runtime": 42.0167,
"eval_samples_per_second": 907.258,
"eval_steps_per_second": 113.407,
"step": 19060
}
],
"logging_steps": 100,
"max_steps": 19060,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.00297934303232e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}