google-codegemma-2b-documint / trainer_state.json
matrix-multiply's picture
Upload 11 files
6fd57bb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.541703559412077,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 54.43408203125,
"learning_rate": 0.0001,
"loss": 6.3724,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 54.14735794067383,
"learning_rate": 0.0002,
"loss": 6.3055,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 19.2130184173584,
"learning_rate": 0.00019992902767920512,
"loss": 3.739,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 12.525617599487305,
"learning_rate": 0.00019985805535841024,
"loss": 2.6998,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 10.059170722961426,
"learning_rate": 0.00019978708303761532,
"loss": 2.136,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 6.011496067047119,
"learning_rate": 0.00019971611071682046,
"loss": 1.8854,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 2.9094812870025635,
"learning_rate": 0.00019964513839602557,
"loss": 1.8689,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 1.9098069667816162,
"learning_rate": 0.00019957416607523069,
"loss": 1.762,
"step": 8
},
{
"epoch": 0.01,
"grad_norm": 1.3670941591262817,
"learning_rate": 0.00019950319375443577,
"loss": 1.6628,
"step": 9
},
{
"epoch": 0.01,
"grad_norm": 1.1179559230804443,
"learning_rate": 0.00019943222143364088,
"loss": 1.6198,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 0.896386444568634,
"learning_rate": 0.000199361249112846,
"loss": 1.5714,
"step": 11
},
{
"epoch": 0.02,
"grad_norm": 0.9485528469085693,
"learning_rate": 0.0001992902767920511,
"loss": 1.5582,
"step": 12
},
{
"epoch": 0.02,
"grad_norm": 0.9695132970809937,
"learning_rate": 0.00019921930447125622,
"loss": 1.5308,
"step": 13
},
{
"epoch": 0.02,
"grad_norm": 0.7802467346191406,
"learning_rate": 0.00019914833215046134,
"loss": 1.5607,
"step": 14
},
{
"epoch": 0.02,
"grad_norm": 0.7041746377944946,
"learning_rate": 0.00019907735982966645,
"loss": 1.5013,
"step": 15
},
{
"epoch": 0.02,
"grad_norm": 0.6526293158531189,
"learning_rate": 0.00019900638750887156,
"loss": 1.4903,
"step": 16
},
{
"epoch": 0.02,
"grad_norm": 0.7452432513237,
"learning_rate": 0.00019893541518807665,
"loss": 1.495,
"step": 17
},
{
"epoch": 0.03,
"grad_norm": 0.642892062664032,
"learning_rate": 0.00019886444286728176,
"loss": 1.3996,
"step": 18
},
{
"epoch": 0.03,
"grad_norm": 0.608568012714386,
"learning_rate": 0.00019879347054648687,
"loss": 1.4793,
"step": 19
},
{
"epoch": 0.03,
"grad_norm": 0.6717165112495422,
"learning_rate": 0.000198722498225692,
"loss": 1.4569,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 0.6507506370544434,
"learning_rate": 0.0001986515259048971,
"loss": 1.4318,
"step": 21
},
{
"epoch": 0.03,
"grad_norm": 0.6129789352416992,
"learning_rate": 0.0001985805535841022,
"loss": 1.4275,
"step": 22
},
{
"epoch": 0.03,
"grad_norm": 0.606242835521698,
"learning_rate": 0.00019850958126330732,
"loss": 1.3986,
"step": 23
},
{
"epoch": 0.03,
"grad_norm": 0.656890869140625,
"learning_rate": 0.00019843860894251244,
"loss": 1.4376,
"step": 24
},
{
"epoch": 0.04,
"grad_norm": 0.6069300174713135,
"learning_rate": 0.00019836763662171752,
"loss": 1.4592,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 0.6284168362617493,
"learning_rate": 0.00019829666430092263,
"loss": 1.4375,
"step": 26
},
{
"epoch": 0.04,
"grad_norm": 0.6825727820396423,
"learning_rate": 0.00019822569198012777,
"loss": 1.4902,
"step": 27
},
{
"epoch": 0.04,
"grad_norm": 0.5566306114196777,
"learning_rate": 0.00019815471965933289,
"loss": 1.3259,
"step": 28
},
{
"epoch": 0.04,
"grad_norm": 0.5704931020736694,
"learning_rate": 0.00019808374733853797,
"loss": 1.4229,
"step": 29
},
{
"epoch": 0.04,
"grad_norm": 0.6081483364105225,
"learning_rate": 0.00019801277501774308,
"loss": 1.3713,
"step": 30
},
{
"epoch": 0.04,
"grad_norm": 0.6242185235023499,
"learning_rate": 0.0001979418026969482,
"loss": 1.3438,
"step": 31
},
{
"epoch": 0.05,
"grad_norm": 0.5827323198318481,
"learning_rate": 0.0001978708303761533,
"loss": 1.4987,
"step": 32
},
{
"epoch": 0.05,
"grad_norm": 0.5230266451835632,
"learning_rate": 0.00019779985805535842,
"loss": 1.3116,
"step": 33
},
{
"epoch": 0.05,
"grad_norm": 0.5829095244407654,
"learning_rate": 0.00019772888573456354,
"loss": 1.5099,
"step": 34
},
{
"epoch": 0.05,
"grad_norm": 0.61451655626297,
"learning_rate": 0.00019765791341376865,
"loss": 1.4576,
"step": 35
},
{
"epoch": 0.05,
"grad_norm": 0.5359353423118591,
"learning_rate": 0.00019758694109297376,
"loss": 1.4095,
"step": 36
},
{
"epoch": 0.05,
"grad_norm": 0.5368149876594543,
"learning_rate": 0.00019751596877217887,
"loss": 1.3962,
"step": 37
},
{
"epoch": 0.05,
"grad_norm": 0.5719636678695679,
"learning_rate": 0.00019744499645138396,
"loss": 1.3462,
"step": 38
},
{
"epoch": 0.06,
"grad_norm": 0.5419232845306396,
"learning_rate": 0.00019737402413058907,
"loss": 1.3416,
"step": 39
},
{
"epoch": 0.06,
"grad_norm": 0.530976414680481,
"learning_rate": 0.00019730305180979418,
"loss": 1.4029,
"step": 40
},
{
"epoch": 0.06,
"grad_norm": 0.5526696443557739,
"learning_rate": 0.0001972320794889993,
"loss": 1.3693,
"step": 41
},
{
"epoch": 0.06,
"grad_norm": 0.5667834281921387,
"learning_rate": 0.0001971611071682044,
"loss": 1.3882,
"step": 42
},
{
"epoch": 0.06,
"grad_norm": 0.5483084917068481,
"learning_rate": 0.00019709013484740952,
"loss": 1.4203,
"step": 43
},
{
"epoch": 0.06,
"grad_norm": 0.5592283606529236,
"learning_rate": 0.00019701916252661464,
"loss": 1.366,
"step": 44
},
{
"epoch": 0.06,
"grad_norm": 0.5499030351638794,
"learning_rate": 0.00019694819020581975,
"loss": 1.3449,
"step": 45
},
{
"epoch": 0.07,
"grad_norm": 0.6002295613288879,
"learning_rate": 0.00019687721788502483,
"loss": 1.3919,
"step": 46
},
{
"epoch": 0.07,
"grad_norm": 0.5805856585502625,
"learning_rate": 0.00019680624556422995,
"loss": 1.317,
"step": 47
},
{
"epoch": 0.07,
"grad_norm": 0.5594860911369324,
"learning_rate": 0.00019673527324343506,
"loss": 1.4043,
"step": 48
},
{
"epoch": 0.07,
"grad_norm": 0.5409876108169556,
"learning_rate": 0.0001966643009226402,
"loss": 1.411,
"step": 49
},
{
"epoch": 0.07,
"grad_norm": 0.5239064693450928,
"learning_rate": 0.00019659332860184528,
"loss": 1.4308,
"step": 50
},
{
"epoch": 0.07,
"grad_norm": 0.5057370662689209,
"learning_rate": 0.0001965223562810504,
"loss": 1.3265,
"step": 51
},
{
"epoch": 0.07,
"grad_norm": 0.5035980939865112,
"learning_rate": 0.0001964513839602555,
"loss": 1.3382,
"step": 52
},
{
"epoch": 0.08,
"grad_norm": 0.5412520170211792,
"learning_rate": 0.00019638041163946062,
"loss": 1.3436,
"step": 53
},
{
"epoch": 0.08,
"grad_norm": 0.5295203328132629,
"learning_rate": 0.0001963094393186657,
"loss": 1.4424,
"step": 54
},
{
"epoch": 0.08,
"grad_norm": 0.5216801166534424,
"learning_rate": 0.00019623846699787082,
"loss": 1.3575,
"step": 55
},
{
"epoch": 0.08,
"grad_norm": 0.5418757200241089,
"learning_rate": 0.00019616749467707596,
"loss": 1.2319,
"step": 56
},
{
"epoch": 0.08,
"grad_norm": 0.5106856822967529,
"learning_rate": 0.00019609652235628107,
"loss": 1.3407,
"step": 57
},
{
"epoch": 0.08,
"grad_norm": 0.5250461101531982,
"learning_rate": 0.00019602555003548616,
"loss": 1.3413,
"step": 58
},
{
"epoch": 0.08,
"grad_norm": 0.5034674406051636,
"learning_rate": 0.00019595457771469127,
"loss": 1.3602,
"step": 59
},
{
"epoch": 0.09,
"grad_norm": 0.5334136486053467,
"learning_rate": 0.00019588360539389639,
"loss": 1.3282,
"step": 60
},
{
"epoch": 0.09,
"grad_norm": 0.5571975708007812,
"learning_rate": 0.0001958126330731015,
"loss": 1.4221,
"step": 61
},
{
"epoch": 0.09,
"grad_norm": 0.48554927110671997,
"learning_rate": 0.0001957416607523066,
"loss": 1.3636,
"step": 62
},
{
"epoch": 0.09,
"grad_norm": 0.5140034556388855,
"learning_rate": 0.00019567068843151172,
"loss": 1.3278,
"step": 63
},
{
"epoch": 0.09,
"grad_norm": 0.5018851161003113,
"learning_rate": 0.00019559971611071684,
"loss": 1.3048,
"step": 64
},
{
"epoch": 0.09,
"grad_norm": 0.543761670589447,
"learning_rate": 0.00019552874378992195,
"loss": 1.3423,
"step": 65
},
{
"epoch": 0.09,
"grad_norm": 0.5353609919548035,
"learning_rate": 0.00019545777146912706,
"loss": 1.3029,
"step": 66
},
{
"epoch": 0.09,
"grad_norm": 0.5764626264572144,
"learning_rate": 0.00019538679914833215,
"loss": 1.409,
"step": 67
},
{
"epoch": 0.1,
"grad_norm": 0.4783156216144562,
"learning_rate": 0.00019531582682753726,
"loss": 1.3306,
"step": 68
},
{
"epoch": 0.1,
"grad_norm": 0.48478803038597107,
"learning_rate": 0.00019524485450674237,
"loss": 1.4423,
"step": 69
},
{
"epoch": 0.1,
"grad_norm": 0.5065445899963379,
"learning_rate": 0.00019517388218594749,
"loss": 1.2997,
"step": 70
},
{
"epoch": 0.1,
"grad_norm": 0.4838726222515106,
"learning_rate": 0.0001951029098651526,
"loss": 1.3767,
"step": 71
},
{
"epoch": 0.1,
"grad_norm": 0.6124043464660645,
"learning_rate": 0.0001950319375443577,
"loss": 1.305,
"step": 72
},
{
"epoch": 0.1,
"grad_norm": 0.5889860391616821,
"learning_rate": 0.00019496096522356282,
"loss": 1.3477,
"step": 73
},
{
"epoch": 0.1,
"grad_norm": 0.5476070046424866,
"learning_rate": 0.00019488999290276794,
"loss": 1.3736,
"step": 74
},
{
"epoch": 0.11,
"grad_norm": 0.4919136166572571,
"learning_rate": 0.00019481902058197302,
"loss": 1.3518,
"step": 75
},
{
"epoch": 0.11,
"grad_norm": 0.5444558262825012,
"learning_rate": 0.00019474804826117813,
"loss": 1.4115,
"step": 76
},
{
"epoch": 0.11,
"grad_norm": 0.51210618019104,
"learning_rate": 0.00019467707594038327,
"loss": 1.3419,
"step": 77
},
{
"epoch": 0.11,
"grad_norm": 0.5465154647827148,
"learning_rate": 0.0001946061036195884,
"loss": 1.3435,
"step": 78
},
{
"epoch": 0.11,
"grad_norm": 0.4859498143196106,
"learning_rate": 0.00019453513129879347,
"loss": 1.3392,
"step": 79
},
{
"epoch": 0.11,
"grad_norm": 0.48886799812316895,
"learning_rate": 0.00019446415897799859,
"loss": 1.3239,
"step": 80
},
{
"epoch": 0.11,
"grad_norm": 0.514495313167572,
"learning_rate": 0.0001943931866572037,
"loss": 1.3007,
"step": 81
},
{
"epoch": 0.12,
"grad_norm": 0.5247107148170471,
"learning_rate": 0.0001943222143364088,
"loss": 1.3192,
"step": 82
},
{
"epoch": 0.12,
"grad_norm": 0.5341958999633789,
"learning_rate": 0.00019425124201561392,
"loss": 1.3269,
"step": 83
},
{
"epoch": 0.12,
"grad_norm": 0.5308558940887451,
"learning_rate": 0.00019418026969481904,
"loss": 1.3739,
"step": 84
},
{
"epoch": 0.12,
"grad_norm": 0.5067780613899231,
"learning_rate": 0.00019410929737402415,
"loss": 1.2969,
"step": 85
},
{
"epoch": 0.12,
"grad_norm": 0.5011096596717834,
"learning_rate": 0.00019403832505322926,
"loss": 1.3132,
"step": 86
},
{
"epoch": 0.12,
"grad_norm": 0.49596866965293884,
"learning_rate": 0.00019396735273243435,
"loss": 1.3118,
"step": 87
},
{
"epoch": 0.12,
"grad_norm": 0.5615769624710083,
"learning_rate": 0.00019389638041163946,
"loss": 1.4271,
"step": 88
},
{
"epoch": 0.13,
"grad_norm": 0.5133160352706909,
"learning_rate": 0.00019382540809084457,
"loss": 1.4121,
"step": 89
},
{
"epoch": 0.13,
"grad_norm": 0.5177298784255981,
"learning_rate": 0.00019375443577004969,
"loss": 1.3674,
"step": 90
},
{
"epoch": 0.13,
"grad_norm": 0.5397670269012451,
"learning_rate": 0.0001936834634492548,
"loss": 1.3749,
"step": 91
},
{
"epoch": 0.13,
"grad_norm": 0.510032057762146,
"learning_rate": 0.0001936124911284599,
"loss": 1.4079,
"step": 92
},
{
"epoch": 0.13,
"grad_norm": 0.49133965373039246,
"learning_rate": 0.00019354151880766502,
"loss": 1.3387,
"step": 93
},
{
"epoch": 0.13,
"grad_norm": 0.5044322609901428,
"learning_rate": 0.00019347054648687014,
"loss": 1.3763,
"step": 94
},
{
"epoch": 0.13,
"grad_norm": 0.5398576259613037,
"learning_rate": 0.00019339957416607525,
"loss": 1.342,
"step": 95
},
{
"epoch": 0.14,
"grad_norm": 0.5276138782501221,
"learning_rate": 0.00019332860184528033,
"loss": 1.3398,
"step": 96
},
{
"epoch": 0.14,
"grad_norm": 0.49148228764533997,
"learning_rate": 0.00019325762952448545,
"loss": 1.3678,
"step": 97
},
{
"epoch": 0.14,
"grad_norm": 0.5225054025650024,
"learning_rate": 0.0001931866572036906,
"loss": 1.297,
"step": 98
},
{
"epoch": 0.14,
"grad_norm": 0.5107095241546631,
"learning_rate": 0.0001931156848828957,
"loss": 1.2915,
"step": 99
},
{
"epoch": 0.14,
"grad_norm": 0.5660765171051025,
"learning_rate": 0.00019304471256210079,
"loss": 1.3034,
"step": 100
},
{
"epoch": 0.14,
"grad_norm": 0.5086053013801575,
"learning_rate": 0.0001929737402413059,
"loss": 1.2538,
"step": 101
},
{
"epoch": 0.14,
"grad_norm": 0.5296218991279602,
"learning_rate": 0.000192902767920511,
"loss": 1.3653,
"step": 102
},
{
"epoch": 0.15,
"grad_norm": 0.505626380443573,
"learning_rate": 0.00019283179559971612,
"loss": 1.3468,
"step": 103
},
{
"epoch": 0.15,
"grad_norm": 0.48313605785369873,
"learning_rate": 0.0001927608232789212,
"loss": 1.3689,
"step": 104
},
{
"epoch": 0.15,
"grad_norm": 0.5230417847633362,
"learning_rate": 0.00019268985095812635,
"loss": 1.3385,
"step": 105
},
{
"epoch": 0.15,
"grad_norm": 0.548197865486145,
"learning_rate": 0.00019261887863733146,
"loss": 1.4077,
"step": 106
},
{
"epoch": 0.15,
"grad_norm": 0.49077072739601135,
"learning_rate": 0.00019254790631653657,
"loss": 1.3333,
"step": 107
},
{
"epoch": 0.15,
"grad_norm": 0.49506676197052,
"learning_rate": 0.00019247693399574166,
"loss": 1.3019,
"step": 108
},
{
"epoch": 0.15,
"grad_norm": 0.5120548009872437,
"learning_rate": 0.00019240596167494677,
"loss": 1.2955,
"step": 109
},
{
"epoch": 0.16,
"grad_norm": 0.5009211301803589,
"learning_rate": 0.00019233498935415189,
"loss": 1.33,
"step": 110
},
{
"epoch": 0.16,
"grad_norm": 0.4983401894569397,
"learning_rate": 0.000192264017033357,
"loss": 1.2762,
"step": 111
},
{
"epoch": 0.16,
"grad_norm": 0.5030891299247742,
"learning_rate": 0.0001921930447125621,
"loss": 1.2902,
"step": 112
},
{
"epoch": 0.16,
"grad_norm": 0.5501623749732971,
"learning_rate": 0.00019212207239176722,
"loss": 1.3797,
"step": 113
},
{
"epoch": 0.16,
"grad_norm": 0.5063967704772949,
"learning_rate": 0.00019205110007097234,
"loss": 1.2997,
"step": 114
},
{
"epoch": 0.16,
"grad_norm": 0.5631483793258667,
"learning_rate": 0.00019198012775017745,
"loss": 1.274,
"step": 115
},
{
"epoch": 0.16,
"grad_norm": 0.537670373916626,
"learning_rate": 0.00019190915542938254,
"loss": 1.3115,
"step": 116
},
{
"epoch": 0.17,
"grad_norm": 0.541203498840332,
"learning_rate": 0.00019183818310858765,
"loss": 1.3092,
"step": 117
},
{
"epoch": 0.17,
"grad_norm": 0.5539008975028992,
"learning_rate": 0.00019176721078779276,
"loss": 1.3223,
"step": 118
},
{
"epoch": 0.17,
"grad_norm": 0.5102487802505493,
"learning_rate": 0.0001916962384669979,
"loss": 1.3181,
"step": 119
},
{
"epoch": 0.17,
"grad_norm": 0.502543032169342,
"learning_rate": 0.00019162526614620299,
"loss": 1.4029,
"step": 120
},
{
"epoch": 0.17,
"grad_norm": 0.5096634030342102,
"learning_rate": 0.0001915542938254081,
"loss": 1.308,
"step": 121
},
{
"epoch": 0.17,
"grad_norm": 0.547661542892456,
"learning_rate": 0.0001914833215046132,
"loss": 1.3109,
"step": 122
},
{
"epoch": 0.17,
"grad_norm": 0.4794137179851532,
"learning_rate": 0.00019141234918381832,
"loss": 1.3134,
"step": 123
},
{
"epoch": 0.18,
"grad_norm": 0.542648434638977,
"learning_rate": 0.00019134137686302344,
"loss": 1.3346,
"step": 124
},
{
"epoch": 0.18,
"grad_norm": 0.5438342690467834,
"learning_rate": 0.00019127040454222852,
"loss": 1.2907,
"step": 125
},
{
"epoch": 0.18,
"grad_norm": 0.5569918155670166,
"learning_rate": 0.00019119943222143366,
"loss": 1.348,
"step": 126
},
{
"epoch": 0.18,
"grad_norm": 0.5183106064796448,
"learning_rate": 0.00019112845990063878,
"loss": 1.3846,
"step": 127
},
{
"epoch": 0.18,
"grad_norm": 0.5425500869750977,
"learning_rate": 0.0001910574875798439,
"loss": 1.2643,
"step": 128
},
{
"epoch": 0.18,
"grad_norm": 0.6269490718841553,
"learning_rate": 0.00019098651525904897,
"loss": 1.2857,
"step": 129
},
{
"epoch": 0.18,
"grad_norm": 0.5118828415870667,
"learning_rate": 0.00019091554293825409,
"loss": 1.3618,
"step": 130
},
{
"epoch": 0.19,
"grad_norm": 0.48022595047950745,
"learning_rate": 0.0001908445706174592,
"loss": 1.3721,
"step": 131
},
{
"epoch": 0.19,
"grad_norm": 0.5537993907928467,
"learning_rate": 0.0001907735982966643,
"loss": 1.2895,
"step": 132
},
{
"epoch": 0.19,
"grad_norm": 0.5127624869346619,
"learning_rate": 0.00019070262597586942,
"loss": 1.3227,
"step": 133
},
{
"epoch": 0.19,
"grad_norm": 0.49052900075912476,
"learning_rate": 0.00019063165365507454,
"loss": 1.2652,
"step": 134
},
{
"epoch": 0.19,
"grad_norm": 0.499605268239975,
"learning_rate": 0.00019056068133427965,
"loss": 1.2807,
"step": 135
},
{
"epoch": 0.19,
"grad_norm": 0.514790952205658,
"learning_rate": 0.00019048970901348476,
"loss": 1.2387,
"step": 136
},
{
"epoch": 0.19,
"grad_norm": 0.5162804126739502,
"learning_rate": 0.00019041873669268985,
"loss": 1.3021,
"step": 137
},
{
"epoch": 0.2,
"grad_norm": 0.5387127995491028,
"learning_rate": 0.00019034776437189496,
"loss": 1.3652,
"step": 138
},
{
"epoch": 0.2,
"grad_norm": 0.5182953476905823,
"learning_rate": 0.00019027679205110007,
"loss": 1.3704,
"step": 139
},
{
"epoch": 0.2,
"grad_norm": 0.5148733854293823,
"learning_rate": 0.00019020581973030519,
"loss": 1.2932,
"step": 140
},
{
"epoch": 0.2,
"grad_norm": 0.5453186631202698,
"learning_rate": 0.0001901348474095103,
"loss": 1.3298,
"step": 141
},
{
"epoch": 0.2,
"grad_norm": 0.49780547618865967,
"learning_rate": 0.0001900638750887154,
"loss": 1.3241,
"step": 142
},
{
"epoch": 0.2,
"grad_norm": 0.563477635383606,
"learning_rate": 0.00018999290276792052,
"loss": 1.3003,
"step": 143
},
{
"epoch": 0.2,
"grad_norm": 0.6826238036155701,
"learning_rate": 0.00018992193044712564,
"loss": 1.2782,
"step": 144
},
{
"epoch": 0.21,
"grad_norm": 0.5311530828475952,
"learning_rate": 0.00018985095812633072,
"loss": 1.2525,
"step": 145
},
{
"epoch": 0.21,
"grad_norm": 0.5208378434181213,
"learning_rate": 0.00018977998580553584,
"loss": 1.2451,
"step": 146
},
{
"epoch": 0.21,
"grad_norm": 0.5175229907035828,
"learning_rate": 0.00018970901348474095,
"loss": 1.2783,
"step": 147
},
{
"epoch": 0.21,
"grad_norm": 0.5157881379127502,
"learning_rate": 0.0001896380411639461,
"loss": 1.3475,
"step": 148
},
{
"epoch": 0.21,
"grad_norm": 0.510566234588623,
"learning_rate": 0.00018956706884315117,
"loss": 1.3182,
"step": 149
},
{
"epoch": 0.21,
"grad_norm": 0.520010232925415,
"learning_rate": 0.00018949609652235629,
"loss": 1.3495,
"step": 150
},
{
"epoch": 0.21,
"grad_norm": 0.5622514486312866,
"learning_rate": 0.0001894251242015614,
"loss": 1.3128,
"step": 151
},
{
"epoch": 0.22,
"grad_norm": 0.5312150120735168,
"learning_rate": 0.0001893541518807665,
"loss": 1.3203,
"step": 152
},
{
"epoch": 0.22,
"grad_norm": 0.5105769634246826,
"learning_rate": 0.00018928317955997162,
"loss": 1.3155,
"step": 153
},
{
"epoch": 0.22,
"grad_norm": 0.5248401165008545,
"learning_rate": 0.0001892122072391767,
"loss": 1.2616,
"step": 154
},
{
"epoch": 0.22,
"grad_norm": 0.5388246774673462,
"learning_rate": 0.00018914123491838185,
"loss": 1.2817,
"step": 155
},
{
"epoch": 0.22,
"grad_norm": 0.5068986415863037,
"learning_rate": 0.00018907026259758696,
"loss": 1.2676,
"step": 156
},
{
"epoch": 0.22,
"grad_norm": 0.5076289176940918,
"learning_rate": 0.00018899929027679208,
"loss": 1.3285,
"step": 157
},
{
"epoch": 0.22,
"grad_norm": 0.5198759436607361,
"learning_rate": 0.00018892831795599716,
"loss": 1.2746,
"step": 158
},
{
"epoch": 0.23,
"grad_norm": 0.5820220112800598,
"learning_rate": 0.00018885734563520227,
"loss": 1.3505,
"step": 159
},
{
"epoch": 0.23,
"grad_norm": 0.5358129143714905,
"learning_rate": 0.0001887863733144074,
"loss": 1.2784,
"step": 160
},
{
"epoch": 0.23,
"grad_norm": 0.5665339827537537,
"learning_rate": 0.0001887154009936125,
"loss": 1.3348,
"step": 161
},
{
"epoch": 0.23,
"grad_norm": 0.5267711877822876,
"learning_rate": 0.0001886444286728176,
"loss": 1.3123,
"step": 162
},
{
"epoch": 0.23,
"grad_norm": 0.5009903907775879,
"learning_rate": 0.00018857345635202272,
"loss": 1.274,
"step": 163
},
{
"epoch": 0.23,
"grad_norm": 0.6048489212989807,
"learning_rate": 0.00018850248403122784,
"loss": 1.3001,
"step": 164
},
{
"epoch": 0.23,
"grad_norm": 0.5325021147727966,
"learning_rate": 0.00018843151171043295,
"loss": 1.2895,
"step": 165
},
{
"epoch": 0.24,
"grad_norm": 0.6042301654815674,
"learning_rate": 0.00018836053938963804,
"loss": 1.315,
"step": 166
},
{
"epoch": 0.24,
"grad_norm": 0.567377507686615,
"learning_rate": 0.00018828956706884315,
"loss": 1.2663,
"step": 167
},
{
"epoch": 0.24,
"grad_norm": 0.5558852553367615,
"learning_rate": 0.00018821859474804826,
"loss": 1.3094,
"step": 168
},
{
"epoch": 0.24,
"grad_norm": 0.5398116707801819,
"learning_rate": 0.0001881476224272534,
"loss": 1.2863,
"step": 169
},
{
"epoch": 0.24,
"grad_norm": 0.5448858141899109,
"learning_rate": 0.0001880766501064585,
"loss": 1.2846,
"step": 170
},
{
"epoch": 0.24,
"grad_norm": 0.5054113268852234,
"learning_rate": 0.0001880056777856636,
"loss": 1.2498,
"step": 171
},
{
"epoch": 0.24,
"grad_norm": 0.5151749849319458,
"learning_rate": 0.0001879347054648687,
"loss": 1.2958,
"step": 172
},
{
"epoch": 0.25,
"grad_norm": 0.5271678566932678,
"learning_rate": 0.00018786373314407382,
"loss": 1.3349,
"step": 173
},
{
"epoch": 0.25,
"grad_norm": 0.5065703392028809,
"learning_rate": 0.0001877927608232789,
"loss": 1.2965,
"step": 174
},
{
"epoch": 0.25,
"grad_norm": 0.5131989121437073,
"learning_rate": 0.00018772178850248402,
"loss": 1.3322,
"step": 175
},
{
"epoch": 0.25,
"grad_norm": 0.5324966907501221,
"learning_rate": 0.00018765081618168916,
"loss": 1.2557,
"step": 176
},
{
"epoch": 0.25,
"grad_norm": 0.5682156085968018,
"learning_rate": 0.00018757984386089428,
"loss": 1.352,
"step": 177
},
{
"epoch": 0.25,
"grad_norm": 0.5812451243400574,
"learning_rate": 0.00018750887154009936,
"loss": 1.3167,
"step": 178
},
{
"epoch": 0.25,
"grad_norm": 0.5220502018928528,
"learning_rate": 0.00018743789921930447,
"loss": 1.3982,
"step": 179
},
{
"epoch": 0.26,
"grad_norm": 0.519579291343689,
"learning_rate": 0.0001873669268985096,
"loss": 1.2545,
"step": 180
},
{
"epoch": 0.26,
"grad_norm": 0.5515694618225098,
"learning_rate": 0.0001872959545777147,
"loss": 1.2739,
"step": 181
},
{
"epoch": 0.26,
"grad_norm": 0.494443416595459,
"learning_rate": 0.0001872249822569198,
"loss": 1.3223,
"step": 182
},
{
"epoch": 0.26,
"grad_norm": 0.5370269417762756,
"learning_rate": 0.00018715400993612493,
"loss": 1.2643,
"step": 183
},
{
"epoch": 0.26,
"grad_norm": 0.5089952945709229,
"learning_rate": 0.00018708303761533004,
"loss": 1.2543,
"step": 184
},
{
"epoch": 0.26,
"grad_norm": 0.5908558964729309,
"learning_rate": 0.00018701206529453515,
"loss": 1.3621,
"step": 185
},
{
"epoch": 0.26,
"grad_norm": 0.5236672163009644,
"learning_rate": 0.00018694109297374026,
"loss": 1.2589,
"step": 186
},
{
"epoch": 0.26,
"grad_norm": 0.5075586438179016,
"learning_rate": 0.00018687012065294535,
"loss": 1.2567,
"step": 187
},
{
"epoch": 0.27,
"grad_norm": 0.484017938375473,
"learning_rate": 0.00018679914833215046,
"loss": 1.253,
"step": 188
},
{
"epoch": 0.27,
"grad_norm": 0.49491867423057556,
"learning_rate": 0.00018672817601135557,
"loss": 1.2903,
"step": 189
},
{
"epoch": 0.27,
"grad_norm": 0.5130958557128906,
"learning_rate": 0.0001866572036905607,
"loss": 1.3576,
"step": 190
},
{
"epoch": 0.27,
"grad_norm": 0.5242413282394409,
"learning_rate": 0.0001865862313697658,
"loss": 1.259,
"step": 191
},
{
"epoch": 0.27,
"grad_norm": 0.5665105581283569,
"learning_rate": 0.0001865152590489709,
"loss": 1.2892,
"step": 192
},
{
"epoch": 0.27,
"grad_norm": 0.5331645011901855,
"learning_rate": 0.00018644428672817603,
"loss": 1.3102,
"step": 193
},
{
"epoch": 0.27,
"grad_norm": 0.4966602921485901,
"learning_rate": 0.00018637331440738114,
"loss": 1.2915,
"step": 194
},
{
"epoch": 0.28,
"grad_norm": 0.5544999837875366,
"learning_rate": 0.00018630234208658622,
"loss": 1.258,
"step": 195
},
{
"epoch": 0.28,
"grad_norm": 0.520144522190094,
"learning_rate": 0.00018623136976579134,
"loss": 1.3164,
"step": 196
},
{
"epoch": 0.28,
"grad_norm": 0.5174679160118103,
"learning_rate": 0.00018616039744499648,
"loss": 1.344,
"step": 197
},
{
"epoch": 0.28,
"grad_norm": 0.4990881383419037,
"learning_rate": 0.0001860894251242016,
"loss": 1.3103,
"step": 198
},
{
"epoch": 0.28,
"grad_norm": 0.5118770003318787,
"learning_rate": 0.00018601845280340667,
"loss": 1.2647,
"step": 199
},
{
"epoch": 0.28,
"grad_norm": 0.5196303725242615,
"learning_rate": 0.0001859474804826118,
"loss": 1.336,
"step": 200
},
{
"epoch": 0.28,
"grad_norm": 0.5092043280601501,
"learning_rate": 0.0001858765081618169,
"loss": 1.2411,
"step": 201
},
{
"epoch": 0.29,
"grad_norm": 0.5203234553337097,
"learning_rate": 0.000185805535841022,
"loss": 1.2752,
"step": 202
},
{
"epoch": 0.29,
"grad_norm": 0.5390545725822449,
"learning_rate": 0.0001857345635202271,
"loss": 1.2102,
"step": 203
},
{
"epoch": 0.29,
"grad_norm": 0.5045953392982483,
"learning_rate": 0.00018566359119943224,
"loss": 1.3237,
"step": 204
},
{
"epoch": 0.29,
"grad_norm": 0.5172394514083862,
"learning_rate": 0.00018559261887863735,
"loss": 1.2755,
"step": 205
},
{
"epoch": 0.29,
"grad_norm": 0.5241258144378662,
"learning_rate": 0.00018552164655784246,
"loss": 1.3713,
"step": 206
},
{
"epoch": 0.29,
"grad_norm": 0.541176438331604,
"learning_rate": 0.00018545067423704755,
"loss": 1.2301,
"step": 207
},
{
"epoch": 0.29,
"grad_norm": 0.500068187713623,
"learning_rate": 0.00018537970191625266,
"loss": 1.2682,
"step": 208
},
{
"epoch": 0.3,
"grad_norm": 0.5059691071510315,
"learning_rate": 0.00018530872959545777,
"loss": 1.2505,
"step": 209
},
{
"epoch": 0.3,
"grad_norm": 0.49532344937324524,
"learning_rate": 0.0001852377572746629,
"loss": 1.2387,
"step": 210
},
{
"epoch": 0.3,
"grad_norm": 0.6257210373878479,
"learning_rate": 0.000185166784953868,
"loss": 1.3048,
"step": 211
},
{
"epoch": 0.3,
"grad_norm": 0.5715426206588745,
"learning_rate": 0.0001850958126330731,
"loss": 1.3693,
"step": 212
},
{
"epoch": 0.3,
"grad_norm": 0.5583224296569824,
"learning_rate": 0.00018502484031227823,
"loss": 1.2703,
"step": 213
},
{
"epoch": 0.3,
"grad_norm": 0.6376559138298035,
"learning_rate": 0.00018495386799148334,
"loss": 1.3016,
"step": 214
},
{
"epoch": 0.3,
"grad_norm": 0.535058856010437,
"learning_rate": 0.00018488289567068845,
"loss": 1.3551,
"step": 215
},
{
"epoch": 0.31,
"grad_norm": 0.5314571857452393,
"learning_rate": 0.00018481192334989354,
"loss": 1.2762,
"step": 216
},
{
"epoch": 0.31,
"grad_norm": 0.5577436089515686,
"learning_rate": 0.00018474095102909865,
"loss": 1.2742,
"step": 217
},
{
"epoch": 0.31,
"grad_norm": 0.5784611701965332,
"learning_rate": 0.0001846699787083038,
"loss": 1.3804,
"step": 218
},
{
"epoch": 0.31,
"grad_norm": 0.5190976858139038,
"learning_rate": 0.00018459900638750887,
"loss": 1.2978,
"step": 219
},
{
"epoch": 0.31,
"grad_norm": 0.5705554485321045,
"learning_rate": 0.000184528034066714,
"loss": 1.3462,
"step": 220
},
{
"epoch": 0.31,
"grad_norm": 0.5286474227905273,
"learning_rate": 0.0001844570617459191,
"loss": 1.3275,
"step": 221
},
{
"epoch": 0.31,
"grad_norm": 0.5056504607200623,
"learning_rate": 0.0001843860894251242,
"loss": 1.2601,
"step": 222
},
{
"epoch": 0.32,
"grad_norm": 0.5417407751083374,
"learning_rate": 0.00018431511710432933,
"loss": 1.2604,
"step": 223
},
{
"epoch": 0.32,
"grad_norm": 0.5213840007781982,
"learning_rate": 0.0001842441447835344,
"loss": 1.2505,
"step": 224
},
{
"epoch": 0.32,
"grad_norm": 0.5501871109008789,
"learning_rate": 0.00018417317246273952,
"loss": 1.2911,
"step": 225
},
{
"epoch": 0.32,
"grad_norm": 0.561215341091156,
"learning_rate": 0.00018410220014194466,
"loss": 1.2829,
"step": 226
},
{
"epoch": 0.32,
"grad_norm": 0.5201948285102844,
"learning_rate": 0.00018403122782114978,
"loss": 1.2713,
"step": 227
},
{
"epoch": 0.32,
"grad_norm": 0.5538742542266846,
"learning_rate": 0.00018396025550035486,
"loss": 1.2533,
"step": 228
},
{
"epoch": 0.32,
"grad_norm": 0.5249223113059998,
"learning_rate": 0.00018388928317955997,
"loss": 1.3244,
"step": 229
},
{
"epoch": 0.33,
"grad_norm": 0.5145981311798096,
"learning_rate": 0.0001838183108587651,
"loss": 1.2735,
"step": 230
},
{
"epoch": 0.33,
"grad_norm": 0.5346860885620117,
"learning_rate": 0.0001837473385379702,
"loss": 1.2238,
"step": 231
},
{
"epoch": 0.33,
"grad_norm": 0.5297681093215942,
"learning_rate": 0.0001836763662171753,
"loss": 1.3163,
"step": 232
},
{
"epoch": 0.33,
"grad_norm": 0.5075122117996216,
"learning_rate": 0.00018360539389638043,
"loss": 1.3422,
"step": 233
},
{
"epoch": 0.33,
"grad_norm": 0.5438109636306763,
"learning_rate": 0.00018353442157558554,
"loss": 1.3367,
"step": 234
},
{
"epoch": 0.33,
"grad_norm": 0.5246129035949707,
"learning_rate": 0.00018346344925479065,
"loss": 1.2883,
"step": 235
},
{
"epoch": 0.33,
"grad_norm": 0.5447835326194763,
"learning_rate": 0.00018339247693399574,
"loss": 1.2409,
"step": 236
},
{
"epoch": 0.34,
"grad_norm": 0.5204280018806458,
"learning_rate": 0.00018332150461320085,
"loss": 1.26,
"step": 237
},
{
"epoch": 0.34,
"grad_norm": 0.5784415602684021,
"learning_rate": 0.00018325053229240596,
"loss": 1.2953,
"step": 238
},
{
"epoch": 0.34,
"grad_norm": 0.5427743196487427,
"learning_rate": 0.00018317955997161108,
"loss": 1.273,
"step": 239
},
{
"epoch": 0.34,
"grad_norm": 0.6766988635063171,
"learning_rate": 0.0001831085876508162,
"loss": 1.2985,
"step": 240
},
{
"epoch": 0.34,
"grad_norm": 0.5197088122367859,
"learning_rate": 0.0001830376153300213,
"loss": 1.331,
"step": 241
},
{
"epoch": 0.34,
"grad_norm": 0.5441769957542419,
"learning_rate": 0.0001829666430092264,
"loss": 1.2786,
"step": 242
},
{
"epoch": 0.34,
"grad_norm": 0.5796704888343811,
"learning_rate": 0.00018289567068843153,
"loss": 1.3317,
"step": 243
},
{
"epoch": 0.35,
"grad_norm": 0.5018938183784485,
"learning_rate": 0.00018282469836763664,
"loss": 1.1423,
"step": 244
},
{
"epoch": 0.35,
"grad_norm": 0.5162649154663086,
"learning_rate": 0.00018275372604684172,
"loss": 1.2544,
"step": 245
},
{
"epoch": 0.35,
"grad_norm": 0.5415024161338806,
"learning_rate": 0.00018268275372604684,
"loss": 1.2748,
"step": 246
},
{
"epoch": 0.35,
"grad_norm": 0.5413600206375122,
"learning_rate": 0.00018261178140525198,
"loss": 1.3206,
"step": 247
},
{
"epoch": 0.35,
"grad_norm": 0.5510334968566895,
"learning_rate": 0.0001825408090844571,
"loss": 1.2939,
"step": 248
},
{
"epoch": 0.35,
"grad_norm": 0.5214188694953918,
"learning_rate": 0.00018246983676366218,
"loss": 1.3322,
"step": 249
},
{
"epoch": 0.35,
"grad_norm": 0.5412008166313171,
"learning_rate": 0.0001823988644428673,
"loss": 1.2781,
"step": 250
},
{
"epoch": 0.36,
"grad_norm": 0.565697431564331,
"learning_rate": 0.0001823278921220724,
"loss": 1.3404,
"step": 251
},
{
"epoch": 0.36,
"grad_norm": 0.5353158712387085,
"learning_rate": 0.0001822569198012775,
"loss": 1.3575,
"step": 252
},
{
"epoch": 0.36,
"grad_norm": 0.5572609305381775,
"learning_rate": 0.0001821859474804826,
"loss": 1.2713,
"step": 253
},
{
"epoch": 0.36,
"grad_norm": 0.5632525682449341,
"learning_rate": 0.00018211497515968774,
"loss": 1.2556,
"step": 254
},
{
"epoch": 0.36,
"grad_norm": 0.5715951323509216,
"learning_rate": 0.00018204400283889285,
"loss": 1.3299,
"step": 255
},
{
"epoch": 0.36,
"grad_norm": 0.5702312588691711,
"learning_rate": 0.00018197303051809796,
"loss": 1.1876,
"step": 256
},
{
"epoch": 0.36,
"grad_norm": 0.5902386903762817,
"learning_rate": 0.00018190205819730305,
"loss": 1.3058,
"step": 257
},
{
"epoch": 0.37,
"grad_norm": 0.5512535572052002,
"learning_rate": 0.00018183108587650816,
"loss": 1.2739,
"step": 258
},
{
"epoch": 0.37,
"grad_norm": 0.5071843266487122,
"learning_rate": 0.00018176011355571328,
"loss": 1.2882,
"step": 259
},
{
"epoch": 0.37,
"grad_norm": 0.5268561840057373,
"learning_rate": 0.0001816891412349184,
"loss": 1.2187,
"step": 260
},
{
"epoch": 0.37,
"grad_norm": 0.5496917963027954,
"learning_rate": 0.0001816181689141235,
"loss": 1.3007,
"step": 261
},
{
"epoch": 0.37,
"grad_norm": 0.5308434367179871,
"learning_rate": 0.0001815471965933286,
"loss": 1.2778,
"step": 262
},
{
"epoch": 0.37,
"grad_norm": 0.5531402230262756,
"learning_rate": 0.00018147622427253373,
"loss": 1.2411,
"step": 263
},
{
"epoch": 0.37,
"grad_norm": 0.5323889255523682,
"learning_rate": 0.00018140525195173884,
"loss": 1.2655,
"step": 264
},
{
"epoch": 0.38,
"grad_norm": 0.50742506980896,
"learning_rate": 0.00018133427963094392,
"loss": 1.2429,
"step": 265
},
{
"epoch": 0.38,
"grad_norm": 0.5636353492736816,
"learning_rate": 0.00018126330731014904,
"loss": 1.2079,
"step": 266
},
{
"epoch": 0.38,
"grad_norm": 0.5328524708747864,
"learning_rate": 0.00018119233498935415,
"loss": 1.2601,
"step": 267
},
{
"epoch": 0.38,
"grad_norm": 0.5404384136199951,
"learning_rate": 0.0001811213626685593,
"loss": 1.2461,
"step": 268
},
{
"epoch": 0.38,
"grad_norm": 0.5366867184638977,
"learning_rate": 0.00018105039034776438,
"loss": 1.2189,
"step": 269
},
{
"epoch": 0.38,
"grad_norm": 0.548911988735199,
"learning_rate": 0.0001809794180269695,
"loss": 1.3085,
"step": 270
},
{
"epoch": 0.38,
"grad_norm": 0.48217761516571045,
"learning_rate": 0.0001809084457061746,
"loss": 1.1768,
"step": 271
},
{
"epoch": 0.39,
"grad_norm": 0.5497826933860779,
"learning_rate": 0.00018083747338537971,
"loss": 1.2676,
"step": 272
},
{
"epoch": 0.39,
"grad_norm": 0.5193854570388794,
"learning_rate": 0.00018076650106458483,
"loss": 1.2323,
"step": 273
},
{
"epoch": 0.39,
"grad_norm": 0.5157699584960938,
"learning_rate": 0.0001806955287437899,
"loss": 1.2549,
"step": 274
},
{
"epoch": 0.39,
"grad_norm": 0.5180855989456177,
"learning_rate": 0.00018062455642299505,
"loss": 1.2495,
"step": 275
},
{
"epoch": 0.39,
"grad_norm": 0.5382695198059082,
"learning_rate": 0.00018055358410220016,
"loss": 1.2594,
"step": 276
},
{
"epoch": 0.39,
"grad_norm": 0.525709331035614,
"learning_rate": 0.00018048261178140528,
"loss": 1.3069,
"step": 277
},
{
"epoch": 0.39,
"grad_norm": 0.5481734275817871,
"learning_rate": 0.00018041163946061036,
"loss": 1.2754,
"step": 278
},
{
"epoch": 0.4,
"grad_norm": 0.5247664451599121,
"learning_rate": 0.00018034066713981548,
"loss": 1.2433,
"step": 279
},
{
"epoch": 0.4,
"grad_norm": 0.5272576808929443,
"learning_rate": 0.0001802696948190206,
"loss": 1.3306,
"step": 280
},
{
"epoch": 0.4,
"grad_norm": 0.5923863649368286,
"learning_rate": 0.0001801987224982257,
"loss": 1.2251,
"step": 281
},
{
"epoch": 0.4,
"grad_norm": 0.5423222780227661,
"learning_rate": 0.00018012775017743081,
"loss": 1.3083,
"step": 282
},
{
"epoch": 0.4,
"grad_norm": 0.5334257483482361,
"learning_rate": 0.00018005677785663593,
"loss": 1.2794,
"step": 283
},
{
"epoch": 0.4,
"grad_norm": 0.5238288044929504,
"learning_rate": 0.00017998580553584104,
"loss": 1.2115,
"step": 284
},
{
"epoch": 0.4,
"grad_norm": 0.5420286059379578,
"learning_rate": 0.00017991483321504615,
"loss": 1.1612,
"step": 285
},
{
"epoch": 0.41,
"grad_norm": 0.511910080909729,
"learning_rate": 0.00017984386089425124,
"loss": 1.2526,
"step": 286
},
{
"epoch": 0.41,
"grad_norm": 0.5181018710136414,
"learning_rate": 0.00017977288857345635,
"loss": 1.3193,
"step": 287
},
{
"epoch": 0.41,
"grad_norm": 0.5733616352081299,
"learning_rate": 0.00017970191625266146,
"loss": 1.3032,
"step": 288
},
{
"epoch": 0.41,
"grad_norm": 0.5532722473144531,
"learning_rate": 0.0001796309439318666,
"loss": 1.2011,
"step": 289
},
{
"epoch": 0.41,
"grad_norm": 0.5308284163475037,
"learning_rate": 0.0001795599716110717,
"loss": 1.3084,
"step": 290
},
{
"epoch": 0.41,
"grad_norm": 0.5570117831230164,
"learning_rate": 0.0001794889992902768,
"loss": 1.2663,
"step": 291
},
{
"epoch": 0.41,
"grad_norm": 0.5518406629562378,
"learning_rate": 0.00017941802696948191,
"loss": 1.2782,
"step": 292
},
{
"epoch": 0.42,
"grad_norm": 0.5204089283943176,
"learning_rate": 0.00017934705464868703,
"loss": 1.2532,
"step": 293
},
{
"epoch": 0.42,
"grad_norm": 0.5458951592445374,
"learning_rate": 0.0001792760823278921,
"loss": 1.2507,
"step": 294
},
{
"epoch": 0.42,
"grad_norm": 0.6208042502403259,
"learning_rate": 0.00017920511000709723,
"loss": 1.2569,
"step": 295
},
{
"epoch": 0.42,
"grad_norm": 0.5473780632019043,
"learning_rate": 0.00017913413768630236,
"loss": 1.3277,
"step": 296
},
{
"epoch": 0.42,
"grad_norm": 0.5519291162490845,
"learning_rate": 0.00017906316536550748,
"loss": 1.2588,
"step": 297
},
{
"epoch": 0.42,
"grad_norm": 0.5681371092796326,
"learning_rate": 0.00017899219304471256,
"loss": 1.2454,
"step": 298
},
{
"epoch": 0.42,
"grad_norm": 0.5682107210159302,
"learning_rate": 0.00017892122072391768,
"loss": 1.2342,
"step": 299
},
{
"epoch": 0.43,
"grad_norm": 0.5326645374298096,
"learning_rate": 0.0001788502484031228,
"loss": 1.2052,
"step": 300
},
{
"epoch": 0.43,
"grad_norm": 0.6371567249298096,
"learning_rate": 0.0001787792760823279,
"loss": 1.1617,
"step": 301
},
{
"epoch": 0.43,
"grad_norm": 0.6094531416893005,
"learning_rate": 0.00017870830376153301,
"loss": 1.218,
"step": 302
},
{
"epoch": 0.43,
"grad_norm": 0.5426862835884094,
"learning_rate": 0.00017863733144073813,
"loss": 1.313,
"step": 303
},
{
"epoch": 0.43,
"grad_norm": 0.5334256291389465,
"learning_rate": 0.00017856635911994324,
"loss": 1.2978,
"step": 304
},
{
"epoch": 0.43,
"grad_norm": 0.5406014919281006,
"learning_rate": 0.00017849538679914835,
"loss": 1.3094,
"step": 305
},
{
"epoch": 0.43,
"grad_norm": 0.5107502341270447,
"learning_rate": 0.00017842441447835347,
"loss": 1.1634,
"step": 306
},
{
"epoch": 0.43,
"grad_norm": 0.5104424357414246,
"learning_rate": 0.00017835344215755855,
"loss": 1.2576,
"step": 307
},
{
"epoch": 0.44,
"grad_norm": 0.5558850169181824,
"learning_rate": 0.00017828246983676366,
"loss": 1.2696,
"step": 308
},
{
"epoch": 0.44,
"grad_norm": 0.5559511184692383,
"learning_rate": 0.00017821149751596878,
"loss": 1.292,
"step": 309
},
{
"epoch": 0.44,
"grad_norm": 0.5495506525039673,
"learning_rate": 0.0001781405251951739,
"loss": 1.2363,
"step": 310
},
{
"epoch": 0.44,
"grad_norm": 0.5591257214546204,
"learning_rate": 0.000178069552874379,
"loss": 1.2425,
"step": 311
},
{
"epoch": 0.44,
"grad_norm": 0.5381730198860168,
"learning_rate": 0.00017799858055358411,
"loss": 1.1903,
"step": 312
},
{
"epoch": 0.44,
"grad_norm": 0.5501212477684021,
"learning_rate": 0.00017792760823278923,
"loss": 1.2598,
"step": 313
},
{
"epoch": 0.44,
"grad_norm": 0.6151654124259949,
"learning_rate": 0.00017785663591199434,
"loss": 1.1703,
"step": 314
},
{
"epoch": 0.45,
"grad_norm": 0.5302771925926208,
"learning_rate": 0.00017778566359119943,
"loss": 1.268,
"step": 315
},
{
"epoch": 0.45,
"grad_norm": 0.5328003764152527,
"learning_rate": 0.00017771469127040454,
"loss": 1.259,
"step": 316
},
{
"epoch": 0.45,
"grad_norm": 0.5223655700683594,
"learning_rate": 0.00017764371894960965,
"loss": 1.2631,
"step": 317
},
{
"epoch": 0.45,
"grad_norm": 0.5435971021652222,
"learning_rate": 0.0001775727466288148,
"loss": 1.2645,
"step": 318
},
{
"epoch": 0.45,
"grad_norm": 0.5286442041397095,
"learning_rate": 0.00017750177430801988,
"loss": 1.2613,
"step": 319
},
{
"epoch": 0.45,
"grad_norm": 0.5257332921028137,
"learning_rate": 0.000177430801987225,
"loss": 1.2435,
"step": 320
},
{
"epoch": 0.45,
"grad_norm": 0.5707381963729858,
"learning_rate": 0.0001773598296664301,
"loss": 1.2085,
"step": 321
},
{
"epoch": 0.46,
"grad_norm": 0.5447467565536499,
"learning_rate": 0.00017728885734563521,
"loss": 1.2266,
"step": 322
},
{
"epoch": 0.46,
"grad_norm": 0.5707226991653442,
"learning_rate": 0.0001772178850248403,
"loss": 1.2284,
"step": 323
},
{
"epoch": 0.46,
"grad_norm": 0.5743198394775391,
"learning_rate": 0.0001771469127040454,
"loss": 1.1793,
"step": 324
},
{
"epoch": 0.46,
"grad_norm": 0.5705985426902771,
"learning_rate": 0.00017707594038325055,
"loss": 1.2348,
"step": 325
},
{
"epoch": 0.46,
"grad_norm": 0.5277898907661438,
"learning_rate": 0.00017700496806245567,
"loss": 1.1603,
"step": 326
},
{
"epoch": 0.46,
"grad_norm": 0.5345696806907654,
"learning_rate": 0.00017693399574166075,
"loss": 1.2799,
"step": 327
},
{
"epoch": 0.46,
"grad_norm": 0.5616069436073303,
"learning_rate": 0.00017686302342086586,
"loss": 1.2574,
"step": 328
},
{
"epoch": 0.47,
"grad_norm": 0.5110874772071838,
"learning_rate": 0.00017679205110007098,
"loss": 1.2796,
"step": 329
},
{
"epoch": 0.47,
"grad_norm": 0.5312853455543518,
"learning_rate": 0.0001767210787792761,
"loss": 1.294,
"step": 330
},
{
"epoch": 0.47,
"grad_norm": 0.576275110244751,
"learning_rate": 0.0001766501064584812,
"loss": 1.2761,
"step": 331
},
{
"epoch": 0.47,
"grad_norm": 0.5550281405448914,
"learning_rate": 0.00017657913413768631,
"loss": 1.2489,
"step": 332
},
{
"epoch": 0.47,
"grad_norm": 0.55446457862854,
"learning_rate": 0.00017650816181689143,
"loss": 1.3195,
"step": 333
},
{
"epoch": 0.47,
"grad_norm": 0.5434035658836365,
"learning_rate": 0.00017643718949609654,
"loss": 1.2927,
"step": 334
},
{
"epoch": 0.47,
"grad_norm": 0.536832869052887,
"learning_rate": 0.00017636621717530165,
"loss": 1.2448,
"step": 335
},
{
"epoch": 0.48,
"grad_norm": 0.5537269711494446,
"learning_rate": 0.00017629524485450674,
"loss": 1.2461,
"step": 336
},
{
"epoch": 0.48,
"grad_norm": 0.5346850156784058,
"learning_rate": 0.00017622427253371185,
"loss": 1.2614,
"step": 337
},
{
"epoch": 0.48,
"grad_norm": 0.5255287289619446,
"learning_rate": 0.00017615330021291696,
"loss": 1.2376,
"step": 338
},
{
"epoch": 0.48,
"grad_norm": 0.5487991571426392,
"learning_rate": 0.00017608232789212208,
"loss": 1.2523,
"step": 339
},
{
"epoch": 0.48,
"grad_norm": 0.554786205291748,
"learning_rate": 0.0001760113555713272,
"loss": 1.2265,
"step": 340
},
{
"epoch": 0.48,
"grad_norm": 0.8868914246559143,
"learning_rate": 0.0001759403832505323,
"loss": 1.2401,
"step": 341
},
{
"epoch": 0.48,
"grad_norm": 0.5412053465843201,
"learning_rate": 0.00017586941092973741,
"loss": 1.2027,
"step": 342
},
{
"epoch": 0.49,
"grad_norm": 0.5590038895606995,
"learning_rate": 0.00017579843860894253,
"loss": 1.1883,
"step": 343
},
{
"epoch": 0.49,
"grad_norm": 0.5496480464935303,
"learning_rate": 0.0001757274662881476,
"loss": 1.2329,
"step": 344
},
{
"epoch": 0.49,
"grad_norm": 0.5843716263771057,
"learning_rate": 0.00017565649396735273,
"loss": 1.249,
"step": 345
},
{
"epoch": 0.49,
"grad_norm": 0.6055741906166077,
"learning_rate": 0.00017558552164655787,
"loss": 1.1477,
"step": 346
},
{
"epoch": 0.49,
"grad_norm": 0.5583242177963257,
"learning_rate": 0.00017551454932576298,
"loss": 1.2862,
"step": 347
},
{
"epoch": 0.49,
"grad_norm": 2.0163357257843018,
"learning_rate": 0.00017544357700496806,
"loss": 1.2014,
"step": 348
},
{
"epoch": 0.49,
"grad_norm": 0.6089881658554077,
"learning_rate": 0.00017537260468417318,
"loss": 1.3267,
"step": 349
},
{
"epoch": 0.5,
"grad_norm": 0.5367897748947144,
"learning_rate": 0.0001753016323633783,
"loss": 1.2379,
"step": 350
},
{
"epoch": 0.5,
"grad_norm": 0.5732371807098389,
"learning_rate": 0.0001752306600425834,
"loss": 1.2557,
"step": 351
},
{
"epoch": 0.5,
"grad_norm": 0.6220402717590332,
"learning_rate": 0.0001751596877217885,
"loss": 1.2415,
"step": 352
},
{
"epoch": 0.5,
"grad_norm": 0.5300910472869873,
"learning_rate": 0.00017508871540099363,
"loss": 1.2322,
"step": 353
},
{
"epoch": 0.5,
"grad_norm": 0.5924064517021179,
"learning_rate": 0.00017501774308019874,
"loss": 1.2216,
"step": 354
},
{
"epoch": 0.5,
"grad_norm": 0.5625554323196411,
"learning_rate": 0.00017494677075940385,
"loss": 1.1887,
"step": 355
},
{
"epoch": 0.5,
"grad_norm": 0.5225299000740051,
"learning_rate": 0.00017487579843860894,
"loss": 1.2274,
"step": 356
},
{
"epoch": 0.51,
"grad_norm": 0.5613044500350952,
"learning_rate": 0.00017480482611781405,
"loss": 1.2268,
"step": 357
},
{
"epoch": 0.51,
"grad_norm": 0.5904155373573303,
"learning_rate": 0.00017473385379701916,
"loss": 1.266,
"step": 358
},
{
"epoch": 0.51,
"grad_norm": 0.5112659335136414,
"learning_rate": 0.00017466288147622428,
"loss": 1.2891,
"step": 359
},
{
"epoch": 0.51,
"grad_norm": 0.5344319343566895,
"learning_rate": 0.0001745919091554294,
"loss": 1.2561,
"step": 360
},
{
"epoch": 0.51,
"grad_norm": 0.5556378364562988,
"learning_rate": 0.0001745209368346345,
"loss": 1.263,
"step": 361
},
{
"epoch": 0.51,
"grad_norm": 0.5675813555717468,
"learning_rate": 0.00017444996451383962,
"loss": 1.3235,
"step": 362
},
{
"epoch": 0.51,
"grad_norm": 0.5817593932151794,
"learning_rate": 0.00017437899219304473,
"loss": 1.3471,
"step": 363
},
{
"epoch": 0.52,
"grad_norm": 0.5670614242553711,
"learning_rate": 0.00017430801987224984,
"loss": 1.3019,
"step": 364
},
{
"epoch": 0.52,
"grad_norm": 0.5784518122673035,
"learning_rate": 0.00017423704755145493,
"loss": 1.2122,
"step": 365
},
{
"epoch": 0.52,
"grad_norm": 0.5680185556411743,
"learning_rate": 0.00017416607523066004,
"loss": 1.2042,
"step": 366
},
{
"epoch": 0.52,
"grad_norm": 0.5474495887756348,
"learning_rate": 0.00017409510290986518,
"loss": 1.1273,
"step": 367
},
{
"epoch": 0.52,
"grad_norm": 0.5586380362510681,
"learning_rate": 0.00017402413058907026,
"loss": 1.2425,
"step": 368
},
{
"epoch": 0.52,
"grad_norm": 0.5608516335487366,
"learning_rate": 0.00017395315826827538,
"loss": 1.228,
"step": 369
},
{
"epoch": 0.52,
"grad_norm": 0.5339083075523376,
"learning_rate": 0.0001738821859474805,
"loss": 1.2171,
"step": 370
},
{
"epoch": 0.53,
"grad_norm": 0.5438979268074036,
"learning_rate": 0.0001738112136266856,
"loss": 1.2474,
"step": 371
},
{
"epoch": 0.53,
"grad_norm": 0.534762978553772,
"learning_rate": 0.00017374024130589072,
"loss": 1.2844,
"step": 372
},
{
"epoch": 0.53,
"grad_norm": 0.5463000535964966,
"learning_rate": 0.0001736692689850958,
"loss": 1.2389,
"step": 373
},
{
"epoch": 0.53,
"grad_norm": 0.5256685018539429,
"learning_rate": 0.00017359829666430094,
"loss": 1.2425,
"step": 374
},
{
"epoch": 0.53,
"grad_norm": 0.509695827960968,
"learning_rate": 0.00017352732434350605,
"loss": 1.2685,
"step": 375
},
{
"epoch": 0.53,
"grad_norm": 0.532911479473114,
"learning_rate": 0.00017345635202271117,
"loss": 1.237,
"step": 376
},
{
"epoch": 0.53,
"grad_norm": 0.5342946648597717,
"learning_rate": 0.00017338537970191625,
"loss": 1.2334,
"step": 377
},
{
"epoch": 0.54,
"grad_norm": 0.5660805702209473,
"learning_rate": 0.00017331440738112136,
"loss": 1.1716,
"step": 378
},
{
"epoch": 0.54,
"grad_norm": 0.5654976963996887,
"learning_rate": 0.00017324343506032648,
"loss": 1.2343,
"step": 379
},
{
"epoch": 0.54,
"grad_norm": 0.5845637917518616,
"learning_rate": 0.0001731724627395316,
"loss": 1.2037,
"step": 380
},
{
"epoch": 0.54,
"grad_norm": 0.570573091506958,
"learning_rate": 0.0001731014904187367,
"loss": 1.2255,
"step": 381
},
{
"epoch": 0.54,
"grad_norm": 0.567817747592926,
"learning_rate": 0.00017303051809794182,
"loss": 1.2238,
"step": 382
},
{
"epoch": 0.54,
"grad_norm": 0.5502271056175232,
"learning_rate": 0.00017295954577714693,
"loss": 1.2237,
"step": 383
},
{
"epoch": 0.54,
"grad_norm": 0.5540646314620972,
"learning_rate": 0.00017288857345635204,
"loss": 1.2192,
"step": 384
},
{
"epoch": 0.55,
"grad_norm": 0.5388753414154053,
"learning_rate": 0.00017281760113555713,
"loss": 1.2181,
"step": 385
},
{
"epoch": 0.55,
"grad_norm": 0.5435704588890076,
"learning_rate": 0.00017274662881476224,
"loss": 1.1588,
"step": 386
},
{
"epoch": 0.55,
"grad_norm": 0.5815427899360657,
"learning_rate": 0.00017267565649396735,
"loss": 1.1784,
"step": 387
},
{
"epoch": 0.55,
"grad_norm": 0.5804842710494995,
"learning_rate": 0.0001726046841731725,
"loss": 1.274,
"step": 388
},
{
"epoch": 0.55,
"grad_norm": 0.5361278057098389,
"learning_rate": 0.00017253371185237758,
"loss": 1.1567,
"step": 389
},
{
"epoch": 0.55,
"grad_norm": 0.5804944038391113,
"learning_rate": 0.0001724627395315827,
"loss": 1.2138,
"step": 390
},
{
"epoch": 0.55,
"grad_norm": 0.5311663150787354,
"learning_rate": 0.0001723917672107878,
"loss": 1.2318,
"step": 391
},
{
"epoch": 0.56,
"grad_norm": 0.5714538097381592,
"learning_rate": 0.00017232079488999292,
"loss": 1.1928,
"step": 392
},
{
"epoch": 0.56,
"grad_norm": 0.5973841547966003,
"learning_rate": 0.00017224982256919803,
"loss": 1.208,
"step": 393
},
{
"epoch": 0.56,
"grad_norm": 0.644917368888855,
"learning_rate": 0.00017217885024840311,
"loss": 1.2642,
"step": 394
},
{
"epoch": 0.56,
"grad_norm": 0.586104691028595,
"learning_rate": 0.00017210787792760825,
"loss": 1.2574,
"step": 395
},
{
"epoch": 0.56,
"grad_norm": 0.583574652671814,
"learning_rate": 0.00017203690560681337,
"loss": 1.327,
"step": 396
},
{
"epoch": 0.56,
"grad_norm": 0.5617191195487976,
"learning_rate": 0.00017196593328601848,
"loss": 1.1605,
"step": 397
},
{
"epoch": 0.56,
"grad_norm": 0.5635659098625183,
"learning_rate": 0.00017189496096522356,
"loss": 1.2222,
"step": 398
},
{
"epoch": 0.57,
"grad_norm": 0.6253629326820374,
"learning_rate": 0.00017182398864442868,
"loss": 1.2162,
"step": 399
},
{
"epoch": 0.57,
"grad_norm": 0.6384497880935669,
"learning_rate": 0.0001717530163236338,
"loss": 1.2878,
"step": 400
},
{
"epoch": 0.57,
"grad_norm": 0.5805338621139526,
"learning_rate": 0.0001716820440028389,
"loss": 1.2664,
"step": 401
},
{
"epoch": 0.57,
"grad_norm": 0.5705095529556274,
"learning_rate": 0.000171611071682044,
"loss": 1.2205,
"step": 402
},
{
"epoch": 0.57,
"grad_norm": 0.5843649506568909,
"learning_rate": 0.00017154009936124913,
"loss": 1.2003,
"step": 403
},
{
"epoch": 0.57,
"grad_norm": 0.5798406600952148,
"learning_rate": 0.00017146912704045424,
"loss": 1.228,
"step": 404
},
{
"epoch": 0.57,
"grad_norm": 0.60892254114151,
"learning_rate": 0.00017139815471965935,
"loss": 1.1853,
"step": 405
},
{
"epoch": 0.58,
"grad_norm": 0.592847466468811,
"learning_rate": 0.00017132718239886444,
"loss": 1.2062,
"step": 406
},
{
"epoch": 0.58,
"grad_norm": 0.5821055769920349,
"learning_rate": 0.00017125621007806955,
"loss": 1.1844,
"step": 407
},
{
"epoch": 0.58,
"grad_norm": 0.5680034160614014,
"learning_rate": 0.00017118523775727466,
"loss": 1.235,
"step": 408
},
{
"epoch": 0.58,
"grad_norm": 0.5684024095535278,
"learning_rate": 0.00017111426543647978,
"loss": 1.209,
"step": 409
},
{
"epoch": 0.58,
"grad_norm": 0.5451253056526184,
"learning_rate": 0.0001710432931156849,
"loss": 1.2026,
"step": 410
},
{
"epoch": 0.58,
"grad_norm": 0.5240196585655212,
"learning_rate": 0.00017097232079489,
"loss": 1.1733,
"step": 411
},
{
"epoch": 0.58,
"grad_norm": 0.5445153713226318,
"learning_rate": 0.00017090134847409512,
"loss": 1.2643,
"step": 412
},
{
"epoch": 0.59,
"grad_norm": 0.5214688181877136,
"learning_rate": 0.00017083037615330023,
"loss": 1.2381,
"step": 413
},
{
"epoch": 0.59,
"grad_norm": 0.5414560437202454,
"learning_rate": 0.00017075940383250531,
"loss": 1.2705,
"step": 414
},
{
"epoch": 0.59,
"grad_norm": 0.567355215549469,
"learning_rate": 0.00017068843151171043,
"loss": 1.2956,
"step": 415
},
{
"epoch": 0.59,
"grad_norm": 0.5429472327232361,
"learning_rate": 0.00017061745919091554,
"loss": 1.2057,
"step": 416
},
{
"epoch": 0.59,
"grad_norm": 0.5171405673027039,
"learning_rate": 0.00017054648687012068,
"loss": 1.1803,
"step": 417
},
{
"epoch": 0.59,
"grad_norm": 0.5672470331192017,
"learning_rate": 0.00017047551454932577,
"loss": 1.2566,
"step": 418
},
{
"epoch": 0.59,
"grad_norm": 0.5415303707122803,
"learning_rate": 0.00017040454222853088,
"loss": 1.2364,
"step": 419
},
{
"epoch": 0.6,
"grad_norm": 0.546066164970398,
"learning_rate": 0.000170333569907736,
"loss": 1.2458,
"step": 420
},
{
"epoch": 0.6,
"grad_norm": 0.532200813293457,
"learning_rate": 0.0001702625975869411,
"loss": 1.227,
"step": 421
},
{
"epoch": 0.6,
"grad_norm": 0.5418232679367065,
"learning_rate": 0.00017019162526614622,
"loss": 1.1896,
"step": 422
},
{
"epoch": 0.6,
"grad_norm": 0.5557692646980286,
"learning_rate": 0.0001701206529453513,
"loss": 1.213,
"step": 423
},
{
"epoch": 0.6,
"grad_norm": 0.5730442404747009,
"learning_rate": 0.00017004968062455644,
"loss": 1.1846,
"step": 424
},
{
"epoch": 0.6,
"grad_norm": 0.5887081027030945,
"learning_rate": 0.00016997870830376155,
"loss": 1.2557,
"step": 425
},
{
"epoch": 0.6,
"grad_norm": 0.5441513061523438,
"learning_rate": 0.00016990773598296667,
"loss": 1.2167,
"step": 426
},
{
"epoch": 0.6,
"grad_norm": 0.5610032081604004,
"learning_rate": 0.00016983676366217175,
"loss": 1.2331,
"step": 427
},
{
"epoch": 0.61,
"grad_norm": 0.5748931765556335,
"learning_rate": 0.00016976579134137687,
"loss": 1.1176,
"step": 428
},
{
"epoch": 0.61,
"grad_norm": 0.5597652196884155,
"learning_rate": 0.00016969481902058198,
"loss": 1.2147,
"step": 429
},
{
"epoch": 0.61,
"grad_norm": 0.5750680565834045,
"learning_rate": 0.0001696238466997871,
"loss": 1.2943,
"step": 430
},
{
"epoch": 0.61,
"grad_norm": 0.5501390695571899,
"learning_rate": 0.0001695528743789922,
"loss": 1.242,
"step": 431
},
{
"epoch": 0.61,
"grad_norm": 0.5521880984306335,
"learning_rate": 0.00016948190205819732,
"loss": 1.1144,
"step": 432
},
{
"epoch": 0.61,
"grad_norm": 0.563677191734314,
"learning_rate": 0.00016941092973740243,
"loss": 1.2875,
"step": 433
},
{
"epoch": 0.61,
"grad_norm": 0.5883916616439819,
"learning_rate": 0.00016933995741660754,
"loss": 1.2823,
"step": 434
},
{
"epoch": 0.62,
"grad_norm": 0.5612311363220215,
"learning_rate": 0.00016926898509581263,
"loss": 1.1956,
"step": 435
},
{
"epoch": 0.62,
"grad_norm": 0.5482412576675415,
"learning_rate": 0.00016919801277501774,
"loss": 1.2513,
"step": 436
},
{
"epoch": 0.62,
"grad_norm": 0.519913911819458,
"learning_rate": 0.00016912704045422285,
"loss": 1.2337,
"step": 437
},
{
"epoch": 0.62,
"grad_norm": 0.5395796895027161,
"learning_rate": 0.000169056068133428,
"loss": 1.2148,
"step": 438
},
{
"epoch": 0.62,
"grad_norm": 0.5586941242218018,
"learning_rate": 0.00016898509581263308,
"loss": 1.1486,
"step": 439
},
{
"epoch": 0.62,
"grad_norm": 0.5568752884864807,
"learning_rate": 0.0001689141234918382,
"loss": 1.2541,
"step": 440
},
{
"epoch": 0.62,
"grad_norm": 0.5618448853492737,
"learning_rate": 0.0001688431511710433,
"loss": 1.2332,
"step": 441
},
{
"epoch": 0.63,
"grad_norm": 0.5413042902946472,
"learning_rate": 0.00016877217885024842,
"loss": 1.2234,
"step": 442
},
{
"epoch": 0.63,
"grad_norm": 0.5402277112007141,
"learning_rate": 0.0001687012065294535,
"loss": 1.2421,
"step": 443
},
{
"epoch": 0.63,
"grad_norm": 0.5068058967590332,
"learning_rate": 0.00016863023420865861,
"loss": 1.2887,
"step": 444
},
{
"epoch": 0.63,
"grad_norm": 0.5408315658569336,
"learning_rate": 0.00016855926188786375,
"loss": 1.2649,
"step": 445
},
{
"epoch": 0.63,
"grad_norm": 0.5410880446434021,
"learning_rate": 0.00016848828956706887,
"loss": 1.2779,
"step": 446
},
{
"epoch": 0.63,
"grad_norm": 0.5637301206588745,
"learning_rate": 0.00016841731724627395,
"loss": 1.1927,
"step": 447
},
{
"epoch": 0.63,
"grad_norm": 0.5596689581871033,
"learning_rate": 0.00016834634492547907,
"loss": 1.246,
"step": 448
},
{
"epoch": 0.64,
"grad_norm": 0.5656019449234009,
"learning_rate": 0.00016827537260468418,
"loss": 1.2089,
"step": 449
},
{
"epoch": 0.64,
"grad_norm": 0.5710034966468811,
"learning_rate": 0.0001682044002838893,
"loss": 1.2327,
"step": 450
},
{
"epoch": 0.64,
"grad_norm": 0.5633525848388672,
"learning_rate": 0.0001681334279630944,
"loss": 1.1619,
"step": 451
},
{
"epoch": 0.64,
"grad_norm": 0.5352773070335388,
"learning_rate": 0.00016806245564229952,
"loss": 1.2318,
"step": 452
},
{
"epoch": 0.64,
"grad_norm": 0.5729439854621887,
"learning_rate": 0.00016799148332150463,
"loss": 1.208,
"step": 453
},
{
"epoch": 0.64,
"grad_norm": 0.5703145861625671,
"learning_rate": 0.00016792051100070974,
"loss": 1.2752,
"step": 454
},
{
"epoch": 0.64,
"grad_norm": 0.5441925525665283,
"learning_rate": 0.00016784953867991485,
"loss": 1.2427,
"step": 455
},
{
"epoch": 0.65,
"grad_norm": 0.5500312447547913,
"learning_rate": 0.00016777856635911994,
"loss": 1.1833,
"step": 456
},
{
"epoch": 0.65,
"grad_norm": 0.538560688495636,
"learning_rate": 0.00016770759403832505,
"loss": 1.1809,
"step": 457
},
{
"epoch": 0.65,
"grad_norm": 0.5843518972396851,
"learning_rate": 0.00016763662171753017,
"loss": 1.2804,
"step": 458
},
{
"epoch": 0.65,
"grad_norm": 0.5413994789123535,
"learning_rate": 0.00016756564939673528,
"loss": 1.2109,
"step": 459
},
{
"epoch": 0.65,
"grad_norm": 0.5343271493911743,
"learning_rate": 0.0001674946770759404,
"loss": 1.2236,
"step": 460
},
{
"epoch": 0.65,
"grad_norm": 0.5626118183135986,
"learning_rate": 0.0001674237047551455,
"loss": 1.1744,
"step": 461
},
{
"epoch": 0.65,
"grad_norm": 0.5811516642570496,
"learning_rate": 0.00016735273243435062,
"loss": 1.2039,
"step": 462
},
{
"epoch": 0.66,
"grad_norm": 0.5644727945327759,
"learning_rate": 0.00016728176011355573,
"loss": 1.2103,
"step": 463
},
{
"epoch": 0.66,
"grad_norm": 0.560869574546814,
"learning_rate": 0.00016721078779276081,
"loss": 1.1694,
"step": 464
},
{
"epoch": 0.66,
"grad_norm": 0.5319089293479919,
"learning_rate": 0.00016713981547196593,
"loss": 1.1809,
"step": 465
},
{
"epoch": 0.66,
"grad_norm": 0.5337138175964355,
"learning_rate": 0.00016706884315117107,
"loss": 1.1381,
"step": 466
},
{
"epoch": 0.66,
"grad_norm": 0.565428614616394,
"learning_rate": 0.00016699787083037618,
"loss": 1.2766,
"step": 467
},
{
"epoch": 0.66,
"grad_norm": 0.5666337013244629,
"learning_rate": 0.00016692689850958127,
"loss": 1.2236,
"step": 468
},
{
"epoch": 0.66,
"grad_norm": 0.5685385465621948,
"learning_rate": 0.00016685592618878638,
"loss": 1.1651,
"step": 469
},
{
"epoch": 0.67,
"grad_norm": 0.5501173734664917,
"learning_rate": 0.0001667849538679915,
"loss": 1.2433,
"step": 470
},
{
"epoch": 0.67,
"grad_norm": 0.5514901280403137,
"learning_rate": 0.0001667139815471966,
"loss": 1.1912,
"step": 471
},
{
"epoch": 0.67,
"grad_norm": 0.5558573603630066,
"learning_rate": 0.0001666430092264017,
"loss": 1.2777,
"step": 472
},
{
"epoch": 0.67,
"grad_norm": 0.5633502006530762,
"learning_rate": 0.00016657203690560683,
"loss": 1.2223,
"step": 473
},
{
"epoch": 0.67,
"grad_norm": 0.5611795783042908,
"learning_rate": 0.00016650106458481194,
"loss": 1.156,
"step": 474
},
{
"epoch": 0.67,
"grad_norm": 0.5518985986709595,
"learning_rate": 0.00016643009226401705,
"loss": 1.2571,
"step": 475
},
{
"epoch": 0.67,
"grad_norm": 0.5667358040809631,
"learning_rate": 0.00016635911994322214,
"loss": 1.2574,
"step": 476
},
{
"epoch": 0.68,
"grad_norm": 0.5549746751785278,
"learning_rate": 0.00016628814762242725,
"loss": 1.2252,
"step": 477
},
{
"epoch": 0.68,
"grad_norm": 0.5677898526191711,
"learning_rate": 0.00016621717530163237,
"loss": 1.2288,
"step": 478
},
{
"epoch": 0.68,
"grad_norm": 0.5517006516456604,
"learning_rate": 0.00016614620298083748,
"loss": 1.2153,
"step": 479
},
{
"epoch": 0.68,
"grad_norm": 0.5561969876289368,
"learning_rate": 0.0001660752306600426,
"loss": 1.2308,
"step": 480
},
{
"epoch": 0.68,
"grad_norm": 0.5672180652618408,
"learning_rate": 0.0001660042583392477,
"loss": 1.2362,
"step": 481
},
{
"epoch": 0.68,
"grad_norm": 0.5887801647186279,
"learning_rate": 0.00016593328601845282,
"loss": 1.2541,
"step": 482
},
{
"epoch": 0.68,
"grad_norm": 0.5410957336425781,
"learning_rate": 0.00016586231369765793,
"loss": 1.198,
"step": 483
},
{
"epoch": 0.69,
"grad_norm": 0.5757806897163391,
"learning_rate": 0.00016579134137686304,
"loss": 1.2226,
"step": 484
},
{
"epoch": 0.69,
"grad_norm": 0.5371631979942322,
"learning_rate": 0.00016572036905606813,
"loss": 1.1952,
"step": 485
},
{
"epoch": 0.69,
"grad_norm": 0.528356671333313,
"learning_rate": 0.00016564939673527324,
"loss": 1.2062,
"step": 486
},
{
"epoch": 0.69,
"grad_norm": 0.5555338859558105,
"learning_rate": 0.00016557842441447835,
"loss": 1.283,
"step": 487
},
{
"epoch": 0.69,
"grad_norm": 0.5706049799919128,
"learning_rate": 0.00016550745209368347,
"loss": 1.1803,
"step": 488
},
{
"epoch": 0.69,
"grad_norm": 0.5852779150009155,
"learning_rate": 0.00016543647977288858,
"loss": 1.2062,
"step": 489
},
{
"epoch": 0.69,
"grad_norm": 0.5755358934402466,
"learning_rate": 0.0001653655074520937,
"loss": 1.3143,
"step": 490
},
{
"epoch": 0.7,
"grad_norm": 0.5640034079551697,
"learning_rate": 0.0001652945351312988,
"loss": 1.1585,
"step": 491
},
{
"epoch": 0.7,
"grad_norm": 0.576317310333252,
"learning_rate": 0.00016522356281050392,
"loss": 1.332,
"step": 492
},
{
"epoch": 0.7,
"grad_norm": 0.5770896077156067,
"learning_rate": 0.000165152590489709,
"loss": 1.2555,
"step": 493
},
{
"epoch": 0.7,
"grad_norm": 0.576921284198761,
"learning_rate": 0.00016508161816891412,
"loss": 1.3109,
"step": 494
},
{
"epoch": 0.7,
"grad_norm": 0.5501707792282104,
"learning_rate": 0.00016501064584811926,
"loss": 1.2497,
"step": 495
},
{
"epoch": 0.7,
"grad_norm": 0.5693079829216003,
"learning_rate": 0.00016493967352732437,
"loss": 1.2341,
"step": 496
},
{
"epoch": 0.7,
"grad_norm": 0.5721957087516785,
"learning_rate": 0.00016486870120652945,
"loss": 1.1374,
"step": 497
},
{
"epoch": 0.71,
"grad_norm": 0.5555086135864258,
"learning_rate": 0.00016479772888573457,
"loss": 1.1855,
"step": 498
},
{
"epoch": 0.71,
"grad_norm": 0.5749430060386658,
"learning_rate": 0.00016472675656493968,
"loss": 1.1561,
"step": 499
},
{
"epoch": 0.71,
"grad_norm": 0.5816613435745239,
"learning_rate": 0.0001646557842441448,
"loss": 1.2336,
"step": 500
},
{
"epoch": 0.71,
"grad_norm": 0.5536159873008728,
"learning_rate": 0.00016458481192334988,
"loss": 1.2006,
"step": 501
},
{
"epoch": 0.71,
"grad_norm": 0.5615043044090271,
"learning_rate": 0.00016451383960255502,
"loss": 1.2464,
"step": 502
},
{
"epoch": 0.71,
"grad_norm": 0.5301942825317383,
"learning_rate": 0.00016444286728176013,
"loss": 1.0993,
"step": 503
},
{
"epoch": 0.71,
"grad_norm": 0.5370047092437744,
"learning_rate": 0.00016437189496096524,
"loss": 1.2876,
"step": 504
},
{
"epoch": 0.72,
"grad_norm": 0.549637496471405,
"learning_rate": 0.00016430092264017033,
"loss": 1.2099,
"step": 505
},
{
"epoch": 0.72,
"grad_norm": 0.5674926042556763,
"learning_rate": 0.00016422995031937544,
"loss": 1.2473,
"step": 506
},
{
"epoch": 0.72,
"grad_norm": 0.5429442524909973,
"learning_rate": 0.00016415897799858055,
"loss": 1.1994,
"step": 507
},
{
"epoch": 0.72,
"grad_norm": 0.5792283415794373,
"learning_rate": 0.00016408800567778567,
"loss": 1.1834,
"step": 508
},
{
"epoch": 0.72,
"grad_norm": 0.5722516179084778,
"learning_rate": 0.00016401703335699078,
"loss": 1.1922,
"step": 509
},
{
"epoch": 0.72,
"grad_norm": 0.6014196276664734,
"learning_rate": 0.0001639460610361959,
"loss": 1.1813,
"step": 510
},
{
"epoch": 0.72,
"grad_norm": 0.5406440496444702,
"learning_rate": 0.000163875088715401,
"loss": 1.1248,
"step": 511
},
{
"epoch": 0.73,
"grad_norm": 0.5606802701950073,
"learning_rate": 0.00016380411639460612,
"loss": 1.2143,
"step": 512
},
{
"epoch": 0.73,
"grad_norm": 0.5731990933418274,
"learning_rate": 0.00016373314407381123,
"loss": 1.2202,
"step": 513
},
{
"epoch": 0.73,
"grad_norm": 0.5451279282569885,
"learning_rate": 0.00016366217175301632,
"loss": 1.2338,
"step": 514
},
{
"epoch": 0.73,
"grad_norm": 0.5566918849945068,
"learning_rate": 0.00016359119943222143,
"loss": 1.2574,
"step": 515
},
{
"epoch": 0.73,
"grad_norm": 0.5759538412094116,
"learning_rate": 0.00016352022711142657,
"loss": 1.2868,
"step": 516
},
{
"epoch": 0.73,
"grad_norm": 0.5528054237365723,
"learning_rate": 0.00016344925479063165,
"loss": 1.1986,
"step": 517
},
{
"epoch": 0.73,
"grad_norm": 0.5640310645103455,
"learning_rate": 0.00016337828246983677,
"loss": 1.2458,
"step": 518
},
{
"epoch": 0.74,
"grad_norm": 0.5853886604309082,
"learning_rate": 0.00016330731014904188,
"loss": 1.1772,
"step": 519
},
{
"epoch": 0.74,
"grad_norm": 0.5519319176673889,
"learning_rate": 0.000163236337828247,
"loss": 1.1771,
"step": 520
},
{
"epoch": 0.74,
"grad_norm": 0.571728527545929,
"learning_rate": 0.0001631653655074521,
"loss": 1.1939,
"step": 521
},
{
"epoch": 0.74,
"grad_norm": 0.5816935896873474,
"learning_rate": 0.0001630943931866572,
"loss": 1.1395,
"step": 522
},
{
"epoch": 0.74,
"grad_norm": 0.5645069479942322,
"learning_rate": 0.00016302342086586233,
"loss": 1.2277,
"step": 523
},
{
"epoch": 0.74,
"grad_norm": 0.5603705048561096,
"learning_rate": 0.00016295244854506744,
"loss": 1.2194,
"step": 524
},
{
"epoch": 0.74,
"grad_norm": 0.5555822253227234,
"learning_rate": 0.00016288147622427256,
"loss": 1.166,
"step": 525
},
{
"epoch": 0.75,
"grad_norm": 0.5700538158416748,
"learning_rate": 0.00016281050390347764,
"loss": 1.2747,
"step": 526
},
{
"epoch": 0.75,
"grad_norm": 0.5572327375411987,
"learning_rate": 0.00016273953158268275,
"loss": 1.1742,
"step": 527
},
{
"epoch": 0.75,
"grad_norm": 0.5874422192573547,
"learning_rate": 0.00016266855926188787,
"loss": 1.2637,
"step": 528
},
{
"epoch": 0.75,
"grad_norm": 0.61215740442276,
"learning_rate": 0.00016259758694109298,
"loss": 1.2082,
"step": 529
},
{
"epoch": 0.75,
"grad_norm": 0.5912395119667053,
"learning_rate": 0.0001625266146202981,
"loss": 1.2397,
"step": 530
},
{
"epoch": 0.75,
"grad_norm": 0.5844393968582153,
"learning_rate": 0.0001624556422995032,
"loss": 1.2337,
"step": 531
},
{
"epoch": 0.75,
"grad_norm": 0.5393280982971191,
"learning_rate": 0.00016238466997870832,
"loss": 1.1741,
"step": 532
},
{
"epoch": 0.76,
"grad_norm": 0.5622760653495789,
"learning_rate": 0.00016231369765791343,
"loss": 1.1932,
"step": 533
},
{
"epoch": 0.76,
"grad_norm": 0.5254821181297302,
"learning_rate": 0.00016224272533711852,
"loss": 1.1992,
"step": 534
},
{
"epoch": 0.76,
"grad_norm": 0.5416676998138428,
"learning_rate": 0.00016217175301632363,
"loss": 1.2038,
"step": 535
},
{
"epoch": 0.76,
"grad_norm": 0.5292723178863525,
"learning_rate": 0.00016210078069552874,
"loss": 1.2346,
"step": 536
},
{
"epoch": 0.76,
"grad_norm": 0.5630815029144287,
"learning_rate": 0.00016202980837473388,
"loss": 1.1923,
"step": 537
},
{
"epoch": 0.76,
"grad_norm": 0.5653424263000488,
"learning_rate": 0.00016195883605393897,
"loss": 1.2701,
"step": 538
},
{
"epoch": 0.76,
"grad_norm": 0.5717771053314209,
"learning_rate": 0.00016188786373314408,
"loss": 1.2556,
"step": 539
},
{
"epoch": 0.77,
"grad_norm": 0.5919432640075684,
"learning_rate": 0.0001618168914123492,
"loss": 1.2069,
"step": 540
},
{
"epoch": 0.77,
"grad_norm": 0.5614644885063171,
"learning_rate": 0.0001617459190915543,
"loss": 1.24,
"step": 541
},
{
"epoch": 0.77,
"grad_norm": 0.5731059908866882,
"learning_rate": 0.00016167494677075942,
"loss": 1.1578,
"step": 542
},
{
"epoch": 0.77,
"grad_norm": 0.5611764192581177,
"learning_rate": 0.0001616039744499645,
"loss": 1.1595,
"step": 543
},
{
"epoch": 0.77,
"grad_norm": 0.5955880880355835,
"learning_rate": 0.00016153300212916964,
"loss": 1.1857,
"step": 544
},
{
"epoch": 0.77,
"grad_norm": 0.5684686303138733,
"learning_rate": 0.00016146202980837476,
"loss": 1.1648,
"step": 545
},
{
"epoch": 0.77,
"grad_norm": 0.5427114367485046,
"learning_rate": 0.00016139105748757987,
"loss": 1.2056,
"step": 546
},
{
"epoch": 0.77,
"grad_norm": 0.5569632053375244,
"learning_rate": 0.00016132008516678495,
"loss": 1.1554,
"step": 547
},
{
"epoch": 0.78,
"grad_norm": 0.5450757145881653,
"learning_rate": 0.00016124911284599007,
"loss": 1.1602,
"step": 548
},
{
"epoch": 0.78,
"grad_norm": 0.5493456125259399,
"learning_rate": 0.00016117814052519518,
"loss": 1.1994,
"step": 549
},
{
"epoch": 0.78,
"grad_norm": 0.5915331244468689,
"learning_rate": 0.0001611071682044003,
"loss": 1.2192,
"step": 550
},
{
"epoch": 0.78,
"grad_norm": 0.5696746110916138,
"learning_rate": 0.0001610361958836054,
"loss": 1.1858,
"step": 551
},
{
"epoch": 0.78,
"grad_norm": 0.5555996894836426,
"learning_rate": 0.00016096522356281052,
"loss": 1.2077,
"step": 552
},
{
"epoch": 0.78,
"grad_norm": 0.5695299506187439,
"learning_rate": 0.00016089425124201563,
"loss": 1.2805,
"step": 553
},
{
"epoch": 0.78,
"grad_norm": 0.5820121169090271,
"learning_rate": 0.00016082327892122074,
"loss": 1.1841,
"step": 554
},
{
"epoch": 0.79,
"grad_norm": 0.5951303243637085,
"learning_rate": 0.00016075230660042583,
"loss": 1.2728,
"step": 555
},
{
"epoch": 0.79,
"grad_norm": 0.5539563298225403,
"learning_rate": 0.00016068133427963094,
"loss": 1.2861,
"step": 556
},
{
"epoch": 0.79,
"grad_norm": 0.5624521970748901,
"learning_rate": 0.00016061036195883605,
"loss": 1.1379,
"step": 557
},
{
"epoch": 0.79,
"grad_norm": 0.5960082411766052,
"learning_rate": 0.0001605393896380412,
"loss": 1.2268,
"step": 558
},
{
"epoch": 0.79,
"grad_norm": 0.5910595059394836,
"learning_rate": 0.00016046841731724628,
"loss": 1.1165,
"step": 559
},
{
"epoch": 0.79,
"grad_norm": 0.6063516139984131,
"learning_rate": 0.0001603974449964514,
"loss": 1.2154,
"step": 560
},
{
"epoch": 0.79,
"grad_norm": 0.5970266461372375,
"learning_rate": 0.0001603264726756565,
"loss": 1.2419,
"step": 561
},
{
"epoch": 0.8,
"grad_norm": 0.5765990018844604,
"learning_rate": 0.00016025550035486162,
"loss": 1.1821,
"step": 562
},
{
"epoch": 0.8,
"grad_norm": 0.556180477142334,
"learning_rate": 0.0001601845280340667,
"loss": 1.206,
"step": 563
},
{
"epoch": 0.8,
"grad_norm": 0.5440099835395813,
"learning_rate": 0.00016011355571327182,
"loss": 1.2757,
"step": 564
},
{
"epoch": 0.8,
"grad_norm": 0.5394881963729858,
"learning_rate": 0.00016004258339247696,
"loss": 1.1814,
"step": 565
},
{
"epoch": 0.8,
"grad_norm": 0.5824431777000427,
"learning_rate": 0.00015997161107168207,
"loss": 1.1821,
"step": 566
},
{
"epoch": 0.8,
"grad_norm": 0.6307337880134583,
"learning_rate": 0.00015990063875088715,
"loss": 1.2679,
"step": 567
},
{
"epoch": 0.8,
"grad_norm": 0.5689281225204468,
"learning_rate": 0.00015982966643009227,
"loss": 1.2582,
"step": 568
},
{
"epoch": 0.81,
"grad_norm": 0.5547791123390198,
"learning_rate": 0.00015975869410929738,
"loss": 1.2367,
"step": 569
},
{
"epoch": 0.81,
"grad_norm": 0.6274536848068237,
"learning_rate": 0.0001596877217885025,
"loss": 1.1979,
"step": 570
},
{
"epoch": 0.81,
"grad_norm": 0.5949268341064453,
"learning_rate": 0.0001596167494677076,
"loss": 1.2517,
"step": 571
},
{
"epoch": 0.81,
"grad_norm": 0.5719754099845886,
"learning_rate": 0.00015954577714691272,
"loss": 1.2743,
"step": 572
},
{
"epoch": 0.81,
"grad_norm": 0.5814628601074219,
"learning_rate": 0.00015947480482611783,
"loss": 1.1369,
"step": 573
},
{
"epoch": 0.81,
"grad_norm": 0.5738956928253174,
"learning_rate": 0.00015940383250532294,
"loss": 1.1537,
"step": 574
},
{
"epoch": 0.81,
"grad_norm": 0.6162692904472351,
"learning_rate": 0.00015933286018452806,
"loss": 1.139,
"step": 575
},
{
"epoch": 0.82,
"grad_norm": 0.5667981505393982,
"learning_rate": 0.00015926188786373314,
"loss": 1.2606,
"step": 576
},
{
"epoch": 0.82,
"grad_norm": 0.6261587142944336,
"learning_rate": 0.00015919091554293825,
"loss": 1.2971,
"step": 577
},
{
"epoch": 0.82,
"grad_norm": 0.5734363794326782,
"learning_rate": 0.00015911994322214337,
"loss": 1.248,
"step": 578
},
{
"epoch": 0.82,
"grad_norm": 0.5452428460121155,
"learning_rate": 0.00015904897090134848,
"loss": 1.1698,
"step": 579
},
{
"epoch": 0.82,
"grad_norm": 0.5635668635368347,
"learning_rate": 0.0001589779985805536,
"loss": 1.2266,
"step": 580
},
{
"epoch": 0.82,
"grad_norm": 0.5433382987976074,
"learning_rate": 0.0001589070262597587,
"loss": 1.1104,
"step": 581
},
{
"epoch": 0.82,
"grad_norm": 0.5507457256317139,
"learning_rate": 0.00015883605393896382,
"loss": 1.2375,
"step": 582
},
{
"epoch": 0.83,
"grad_norm": 0.5394136309623718,
"learning_rate": 0.00015876508161816893,
"loss": 1.1949,
"step": 583
},
{
"epoch": 0.83,
"grad_norm": 0.5387130975723267,
"learning_rate": 0.00015869410929737402,
"loss": 1.1563,
"step": 584
},
{
"epoch": 0.83,
"grad_norm": 0.5782540440559387,
"learning_rate": 0.00015862313697657913,
"loss": 1.2099,
"step": 585
},
{
"epoch": 0.83,
"grad_norm": 0.5795274972915649,
"learning_rate": 0.00015855216465578424,
"loss": 1.1736,
"step": 586
},
{
"epoch": 0.83,
"grad_norm": 0.5905880331993103,
"learning_rate": 0.00015848119233498938,
"loss": 1.1577,
"step": 587
},
{
"epoch": 0.83,
"grad_norm": 0.6062709093093872,
"learning_rate": 0.00015841022001419447,
"loss": 1.1655,
"step": 588
},
{
"epoch": 0.83,
"grad_norm": 0.6285403370857239,
"learning_rate": 0.00015833924769339958,
"loss": 1.1556,
"step": 589
},
{
"epoch": 0.84,
"grad_norm": 0.5971753001213074,
"learning_rate": 0.0001582682753726047,
"loss": 1.2227,
"step": 590
},
{
"epoch": 0.84,
"grad_norm": 0.5791364312171936,
"learning_rate": 0.0001581973030518098,
"loss": 1.2093,
"step": 591
},
{
"epoch": 0.84,
"grad_norm": 0.5568805932998657,
"learning_rate": 0.0001581263307310149,
"loss": 1.2346,
"step": 592
},
{
"epoch": 0.84,
"grad_norm": 0.5391885042190552,
"learning_rate": 0.00015805535841022,
"loss": 1.1697,
"step": 593
},
{
"epoch": 0.84,
"grad_norm": 0.567952036857605,
"learning_rate": 0.00015798438608942514,
"loss": 1.2303,
"step": 594
},
{
"epoch": 0.84,
"grad_norm": 0.5382447242736816,
"learning_rate": 0.00015791341376863026,
"loss": 1.1986,
"step": 595
},
{
"epoch": 0.84,
"grad_norm": 0.5621328949928284,
"learning_rate": 0.00015784244144783534,
"loss": 1.1619,
"step": 596
},
{
"epoch": 0.85,
"grad_norm": 0.6134306788444519,
"learning_rate": 0.00015777146912704046,
"loss": 1.1331,
"step": 597
},
{
"epoch": 0.85,
"grad_norm": 0.57140052318573,
"learning_rate": 0.00015770049680624557,
"loss": 1.1858,
"step": 598
},
{
"epoch": 0.85,
"grad_norm": 0.5893669128417969,
"learning_rate": 0.00015762952448545068,
"loss": 1.2289,
"step": 599
},
{
"epoch": 0.85,
"grad_norm": 0.5760409832000732,
"learning_rate": 0.0001575585521646558,
"loss": 1.2426,
"step": 600
},
{
"epoch": 0.85,
"grad_norm": 0.591411828994751,
"learning_rate": 0.0001574875798438609,
"loss": 1.195,
"step": 601
},
{
"epoch": 0.85,
"grad_norm": 0.5461949706077576,
"learning_rate": 0.00015741660752306602,
"loss": 1.1627,
"step": 602
},
{
"epoch": 0.85,
"grad_norm": 0.5686529874801636,
"learning_rate": 0.00015734563520227113,
"loss": 1.1048,
"step": 603
},
{
"epoch": 0.86,
"grad_norm": 0.5683251023292542,
"learning_rate": 0.00015727466288147624,
"loss": 1.2594,
"step": 604
},
{
"epoch": 0.86,
"grad_norm": 0.5748125314712524,
"learning_rate": 0.00015720369056068133,
"loss": 1.151,
"step": 605
},
{
"epoch": 0.86,
"grad_norm": 0.5953657031059265,
"learning_rate": 0.00015713271823988644,
"loss": 1.2148,
"step": 606
},
{
"epoch": 0.86,
"grad_norm": 0.5915418863296509,
"learning_rate": 0.00015706174591909156,
"loss": 1.2249,
"step": 607
},
{
"epoch": 0.86,
"grad_norm": 0.5777379274368286,
"learning_rate": 0.00015699077359829667,
"loss": 1.18,
"step": 608
},
{
"epoch": 0.86,
"grad_norm": 0.592585563659668,
"learning_rate": 0.00015691980127750178,
"loss": 1.2306,
"step": 609
},
{
"epoch": 0.86,
"grad_norm": 0.5442766547203064,
"learning_rate": 0.0001568488289567069,
"loss": 1.1789,
"step": 610
},
{
"epoch": 0.87,
"grad_norm": 0.5644369721412659,
"learning_rate": 0.000156777856635912,
"loss": 1.2501,
"step": 611
},
{
"epoch": 0.87,
"grad_norm": 0.55912846326828,
"learning_rate": 0.00015670688431511712,
"loss": 1.2893,
"step": 612
},
{
"epoch": 0.87,
"grad_norm": 0.5613831877708435,
"learning_rate": 0.0001566359119943222,
"loss": 1.1141,
"step": 613
},
{
"epoch": 0.87,
"grad_norm": 0.5890555381774902,
"learning_rate": 0.00015656493967352732,
"loss": 1.2149,
"step": 614
},
{
"epoch": 0.87,
"grad_norm": 0.5675429105758667,
"learning_rate": 0.00015649396735273246,
"loss": 1.2001,
"step": 615
},
{
"epoch": 0.87,
"grad_norm": 0.5950176119804382,
"learning_rate": 0.00015642299503193757,
"loss": 1.2019,
"step": 616
},
{
"epoch": 0.87,
"grad_norm": 0.5831938982009888,
"learning_rate": 0.00015635202271114266,
"loss": 1.1673,
"step": 617
},
{
"epoch": 0.88,
"grad_norm": 0.5806068181991577,
"learning_rate": 0.00015628105039034777,
"loss": 1.2013,
"step": 618
},
{
"epoch": 0.88,
"grad_norm": 0.5464046001434326,
"learning_rate": 0.00015621007806955288,
"loss": 1.1557,
"step": 619
},
{
"epoch": 0.88,
"grad_norm": 0.6087239980697632,
"learning_rate": 0.000156139105748758,
"loss": 1.2024,
"step": 620
},
{
"epoch": 0.88,
"grad_norm": 0.544928252696991,
"learning_rate": 0.00015606813342796308,
"loss": 1.1968,
"step": 621
},
{
"epoch": 0.88,
"grad_norm": 0.5821595788002014,
"learning_rate": 0.00015599716110716822,
"loss": 1.3009,
"step": 622
},
{
"epoch": 0.88,
"grad_norm": 0.5546013116836548,
"learning_rate": 0.00015592618878637333,
"loss": 1.1127,
"step": 623
},
{
"epoch": 0.88,
"grad_norm": 0.5370335578918457,
"learning_rate": 0.00015585521646557844,
"loss": 1.1543,
"step": 624
},
{
"epoch": 0.89,
"grad_norm": 0.5727035999298096,
"learning_rate": 0.00015578424414478353,
"loss": 1.2239,
"step": 625
},
{
"epoch": 0.89,
"grad_norm": 0.5680972933769226,
"learning_rate": 0.00015571327182398864,
"loss": 1.2741,
"step": 626
},
{
"epoch": 0.89,
"grad_norm": 0.5887457728385925,
"learning_rate": 0.00015564229950319376,
"loss": 1.2175,
"step": 627
},
{
"epoch": 0.89,
"grad_norm": 0.5596421957015991,
"learning_rate": 0.00015557132718239887,
"loss": 1.199,
"step": 628
},
{
"epoch": 0.89,
"grad_norm": 0.5555490851402283,
"learning_rate": 0.00015550035486160398,
"loss": 1.125,
"step": 629
},
{
"epoch": 0.89,
"grad_norm": 0.5999779105186462,
"learning_rate": 0.0001554293825408091,
"loss": 1.2344,
"step": 630
},
{
"epoch": 0.89,
"grad_norm": 0.6120169162750244,
"learning_rate": 0.0001553584102200142,
"loss": 1.1822,
"step": 631
},
{
"epoch": 0.9,
"grad_norm": 0.5653102993965149,
"learning_rate": 0.00015528743789921932,
"loss": 1.2004,
"step": 632
},
{
"epoch": 0.9,
"grad_norm": 0.5853379964828491,
"learning_rate": 0.00015521646557842443,
"loss": 1.1797,
"step": 633
},
{
"epoch": 0.9,
"grad_norm": 0.563293993473053,
"learning_rate": 0.00015514549325762952,
"loss": 1.1991,
"step": 634
},
{
"epoch": 0.9,
"grad_norm": 0.5557974576950073,
"learning_rate": 0.00015507452093683463,
"loss": 1.1843,
"step": 635
},
{
"epoch": 0.9,
"grad_norm": 0.5982331037521362,
"learning_rate": 0.00015500354861603977,
"loss": 1.1827,
"step": 636
},
{
"epoch": 0.9,
"grad_norm": 0.5909957885742188,
"learning_rate": 0.00015493257629524486,
"loss": 1.1338,
"step": 637
},
{
"epoch": 0.9,
"grad_norm": 0.5773521661758423,
"learning_rate": 0.00015486160397444997,
"loss": 1.2313,
"step": 638
},
{
"epoch": 0.91,
"grad_norm": 0.5929623246192932,
"learning_rate": 0.00015479063165365508,
"loss": 1.144,
"step": 639
},
{
"epoch": 0.91,
"grad_norm": 0.5677060484886169,
"learning_rate": 0.0001547196593328602,
"loss": 1.1656,
"step": 640
},
{
"epoch": 0.91,
"grad_norm": 0.552845299243927,
"learning_rate": 0.0001546486870120653,
"loss": 1.1576,
"step": 641
},
{
"epoch": 0.91,
"grad_norm": 0.5725761651992798,
"learning_rate": 0.0001545777146912704,
"loss": 1.1562,
"step": 642
},
{
"epoch": 0.91,
"grad_norm": 0.5661845803260803,
"learning_rate": 0.00015450674237047553,
"loss": 1.1172,
"step": 643
},
{
"epoch": 0.91,
"grad_norm": 0.6051177382469177,
"learning_rate": 0.00015443577004968064,
"loss": 1.2199,
"step": 644
},
{
"epoch": 0.91,
"grad_norm": 0.575823187828064,
"learning_rate": 0.00015436479772888576,
"loss": 1.2077,
"step": 645
},
{
"epoch": 0.92,
"grad_norm": 0.5754451751708984,
"learning_rate": 0.00015429382540809084,
"loss": 1.204,
"step": 646
},
{
"epoch": 0.92,
"grad_norm": 0.5662161111831665,
"learning_rate": 0.00015422285308729596,
"loss": 1.2517,
"step": 647
},
{
"epoch": 0.92,
"grad_norm": 0.62320476770401,
"learning_rate": 0.00015415188076650107,
"loss": 1.2593,
"step": 648
},
{
"epoch": 0.92,
"grad_norm": 0.5987563729286194,
"learning_rate": 0.00015408090844570618,
"loss": 1.1732,
"step": 649
},
{
"epoch": 0.92,
"grad_norm": 0.5595415234565735,
"learning_rate": 0.0001540099361249113,
"loss": 1.2089,
"step": 650
},
{
"epoch": 0.92,
"grad_norm": 0.5797716379165649,
"learning_rate": 0.0001539389638041164,
"loss": 1.143,
"step": 651
},
{
"epoch": 0.92,
"grad_norm": 0.5579701662063599,
"learning_rate": 0.00015386799148332152,
"loss": 1.131,
"step": 652
},
{
"epoch": 0.93,
"grad_norm": 0.6035544276237488,
"learning_rate": 0.00015379701916252663,
"loss": 1.1589,
"step": 653
},
{
"epoch": 0.93,
"grad_norm": 0.6274803876876831,
"learning_rate": 0.00015372604684173172,
"loss": 1.2097,
"step": 654
},
{
"epoch": 0.93,
"grad_norm": 0.5816579461097717,
"learning_rate": 0.00015365507452093683,
"loss": 1.112,
"step": 655
},
{
"epoch": 0.93,
"grad_norm": 0.5723366737365723,
"learning_rate": 0.00015358410220014194,
"loss": 1.1616,
"step": 656
},
{
"epoch": 0.93,
"grad_norm": 0.5701184272766113,
"learning_rate": 0.00015351312987934708,
"loss": 1.1869,
"step": 657
},
{
"epoch": 0.93,
"grad_norm": 0.5675951838493347,
"learning_rate": 0.00015344215755855217,
"loss": 1.1766,
"step": 658
},
{
"epoch": 0.93,
"grad_norm": 0.5601312518119812,
"learning_rate": 0.00015337118523775728,
"loss": 1.1863,
"step": 659
},
{
"epoch": 0.94,
"grad_norm": 0.5662368535995483,
"learning_rate": 0.0001533002129169624,
"loss": 1.2104,
"step": 660
},
{
"epoch": 0.94,
"grad_norm": 0.5629364252090454,
"learning_rate": 0.0001532292405961675,
"loss": 1.172,
"step": 661
},
{
"epoch": 0.94,
"grad_norm": 0.5831137895584106,
"learning_rate": 0.00015315826827537262,
"loss": 1.1191,
"step": 662
},
{
"epoch": 0.94,
"grad_norm": 0.5520037412643433,
"learning_rate": 0.0001530872959545777,
"loss": 1.2139,
"step": 663
},
{
"epoch": 0.94,
"grad_norm": 0.5635483264923096,
"learning_rate": 0.00015301632363378282,
"loss": 1.2279,
"step": 664
},
{
"epoch": 0.94,
"grad_norm": 0.635055422782898,
"learning_rate": 0.00015294535131298796,
"loss": 1.3509,
"step": 665
},
{
"epoch": 0.94,
"grad_norm": 0.5712576508522034,
"learning_rate": 0.00015287437899219304,
"loss": 1.1376,
"step": 666
},
{
"epoch": 0.94,
"grad_norm": 0.6059345602989197,
"learning_rate": 0.00015280340667139816,
"loss": 1.1995,
"step": 667
},
{
"epoch": 0.95,
"grad_norm": 0.5608190298080444,
"learning_rate": 0.00015273243435060327,
"loss": 1.2018,
"step": 668
},
{
"epoch": 0.95,
"grad_norm": 0.583979606628418,
"learning_rate": 0.00015266146202980838,
"loss": 1.2649,
"step": 669
},
{
"epoch": 0.95,
"grad_norm": 0.5718328356742859,
"learning_rate": 0.0001525904897090135,
"loss": 1.2384,
"step": 670
},
{
"epoch": 0.95,
"grad_norm": 0.5428913235664368,
"learning_rate": 0.00015251951738821858,
"loss": 1.2453,
"step": 671
},
{
"epoch": 0.95,
"grad_norm": 0.5979950428009033,
"learning_rate": 0.00015244854506742372,
"loss": 1.191,
"step": 672
},
{
"epoch": 0.95,
"grad_norm": 0.5848691463470459,
"learning_rate": 0.00015237757274662883,
"loss": 1.2098,
"step": 673
},
{
"epoch": 0.95,
"grad_norm": 0.5947878360748291,
"learning_rate": 0.00015230660042583395,
"loss": 1.2174,
"step": 674
},
{
"epoch": 0.96,
"grad_norm": 0.5705249309539795,
"learning_rate": 0.00015223562810503903,
"loss": 1.1701,
"step": 675
},
{
"epoch": 0.96,
"grad_norm": 0.5978186726570129,
"learning_rate": 0.00015216465578424414,
"loss": 1.1468,
"step": 676
},
{
"epoch": 0.96,
"grad_norm": 0.5883394479751587,
"learning_rate": 0.00015209368346344926,
"loss": 1.1998,
"step": 677
},
{
"epoch": 0.96,
"grad_norm": 0.551814079284668,
"learning_rate": 0.00015202271114265437,
"loss": 1.1576,
"step": 678
},
{
"epoch": 0.96,
"grad_norm": 0.5914448499679565,
"learning_rate": 0.00015195173882185948,
"loss": 1.2328,
"step": 679
},
{
"epoch": 0.96,
"grad_norm": 0.6165048480033875,
"learning_rate": 0.0001518807665010646,
"loss": 1.203,
"step": 680
},
{
"epoch": 0.96,
"grad_norm": 0.5738925933837891,
"learning_rate": 0.0001518097941802697,
"loss": 1.1468,
"step": 681
},
{
"epoch": 0.97,
"grad_norm": 0.5782356262207031,
"learning_rate": 0.00015173882185947482,
"loss": 1.165,
"step": 682
},
{
"epoch": 0.97,
"grad_norm": 0.6173332333564758,
"learning_rate": 0.0001516678495386799,
"loss": 1.2598,
"step": 683
},
{
"epoch": 0.97,
"grad_norm": 0.5931529998779297,
"learning_rate": 0.00015159687721788502,
"loss": 1.1117,
"step": 684
},
{
"epoch": 0.97,
"grad_norm": 0.5708393454551697,
"learning_rate": 0.00015152590489709013,
"loss": 1.2371,
"step": 685
},
{
"epoch": 0.97,
"grad_norm": 0.5764403939247131,
"learning_rate": 0.00015145493257629527,
"loss": 1.1984,
"step": 686
},
{
"epoch": 0.97,
"grad_norm": 0.5749918818473816,
"learning_rate": 0.00015138396025550036,
"loss": 1.1347,
"step": 687
},
{
"epoch": 0.97,
"grad_norm": 0.5802225470542908,
"learning_rate": 0.00015131298793470547,
"loss": 1.2307,
"step": 688
},
{
"epoch": 0.98,
"grad_norm": 0.5560474991798401,
"learning_rate": 0.00015124201561391058,
"loss": 1.1978,
"step": 689
},
{
"epoch": 0.98,
"grad_norm": 0.5951694250106812,
"learning_rate": 0.0001511710432931157,
"loss": 1.2,
"step": 690
},
{
"epoch": 0.98,
"grad_norm": 0.5568058490753174,
"learning_rate": 0.0001511000709723208,
"loss": 1.154,
"step": 691
},
{
"epoch": 0.98,
"grad_norm": 0.5962051153182983,
"learning_rate": 0.0001510290986515259,
"loss": 1.1111,
"step": 692
},
{
"epoch": 0.98,
"grad_norm": 0.590822160243988,
"learning_rate": 0.00015095812633073103,
"loss": 1.2304,
"step": 693
},
{
"epoch": 0.98,
"grad_norm": 0.6002994775772095,
"learning_rate": 0.00015088715400993615,
"loss": 1.2463,
"step": 694
},
{
"epoch": 0.98,
"grad_norm": 0.5498103499412537,
"learning_rate": 0.00015081618168914126,
"loss": 1.1797,
"step": 695
},
{
"epoch": 0.99,
"grad_norm": 0.5912280678749084,
"learning_rate": 0.00015074520936834634,
"loss": 1.1483,
"step": 696
},
{
"epoch": 0.99,
"grad_norm": 0.5918639302253723,
"learning_rate": 0.00015067423704755146,
"loss": 1.2741,
"step": 697
},
{
"epoch": 0.99,
"grad_norm": 0.5741661190986633,
"learning_rate": 0.00015060326472675657,
"loss": 1.1968,
"step": 698
},
{
"epoch": 0.99,
"grad_norm": 0.5830884575843811,
"learning_rate": 0.00015053229240596168,
"loss": 1.1663,
"step": 699
},
{
"epoch": 0.99,
"grad_norm": 0.5797367095947266,
"learning_rate": 0.0001504613200851668,
"loss": 1.1406,
"step": 700
},
{
"epoch": 0.99,
"grad_norm": 0.5564558506011963,
"learning_rate": 0.0001503903477643719,
"loss": 1.1447,
"step": 701
},
{
"epoch": 0.99,
"grad_norm": 0.5836336612701416,
"learning_rate": 0.00015031937544357702,
"loss": 1.192,
"step": 702
},
{
"epoch": 1.0,
"grad_norm": 0.6179781556129456,
"learning_rate": 0.00015024840312278213,
"loss": 1.2181,
"step": 703
},
{
"epoch": 1.0,
"grad_norm": 0.6290362477302551,
"learning_rate": 0.00015017743080198722,
"loss": 1.234,
"step": 704
},
{
"epoch": 1.0,
"grad_norm": 0.6224949359893799,
"learning_rate": 0.00015010645848119233,
"loss": 1.2052,
"step": 705
},
{
"epoch": 1.0,
"grad_norm": 0.584284782409668,
"learning_rate": 0.00015003548616039744,
"loss": 1.198,
"step": 706
},
{
"epoch": 1.0,
"grad_norm": 0.5489341020584106,
"learning_rate": 0.00014996451383960258,
"loss": 0.9271,
"step": 707
},
{
"epoch": 1.0,
"grad_norm": 0.5513188242912292,
"learning_rate": 0.00014989354151880767,
"loss": 1.0042,
"step": 708
},
{
"epoch": 1.0,
"grad_norm": 0.5592027306556702,
"learning_rate": 0.00014982256919801278,
"loss": 0.9466,
"step": 709
},
{
"epoch": 1.01,
"grad_norm": 0.6773933172225952,
"learning_rate": 0.0001497515968772179,
"loss": 0.9967,
"step": 710
},
{
"epoch": 1.01,
"grad_norm": 0.7364052534103394,
"learning_rate": 0.000149680624556423,
"loss": 0.9311,
"step": 711
},
{
"epoch": 1.01,
"grad_norm": 0.7685378789901733,
"learning_rate": 0.0001496096522356281,
"loss": 1.0459,
"step": 712
},
{
"epoch": 1.01,
"grad_norm": 0.6515752673149109,
"learning_rate": 0.0001495386799148332,
"loss": 0.9924,
"step": 713
},
{
"epoch": 1.01,
"grad_norm": 0.5922237634658813,
"learning_rate": 0.00014946770759403835,
"loss": 0.9159,
"step": 714
},
{
"epoch": 1.01,
"grad_norm": 0.6064658761024475,
"learning_rate": 0.00014939673527324346,
"loss": 0.9521,
"step": 715
},
{
"epoch": 1.01,
"grad_norm": 0.6219831109046936,
"learning_rate": 0.00014932576295244854,
"loss": 1.0295,
"step": 716
},
{
"epoch": 1.02,
"grad_norm": 0.5930375456809998,
"learning_rate": 0.00014925479063165366,
"loss": 1.0015,
"step": 717
},
{
"epoch": 1.02,
"grad_norm": 0.5987579822540283,
"learning_rate": 0.00014918381831085877,
"loss": 0.937,
"step": 718
},
{
"epoch": 1.02,
"grad_norm": 0.6563482880592346,
"learning_rate": 0.00014911284599006388,
"loss": 0.9396,
"step": 719
},
{
"epoch": 1.02,
"grad_norm": 0.6364894509315491,
"learning_rate": 0.000149041873669269,
"loss": 0.9889,
"step": 720
},
{
"epoch": 1.02,
"grad_norm": 0.6609119772911072,
"learning_rate": 0.0001489709013484741,
"loss": 0.9447,
"step": 721
},
{
"epoch": 1.02,
"grad_norm": 0.6506882309913635,
"learning_rate": 0.00014889992902767922,
"loss": 0.957,
"step": 722
},
{
"epoch": 1.02,
"grad_norm": 0.7169485092163086,
"learning_rate": 0.00014882895670688433,
"loss": 1.0139,
"step": 723
},
{
"epoch": 1.03,
"grad_norm": 0.623009443283081,
"learning_rate": 0.00014875798438608945,
"loss": 0.9973,
"step": 724
},
{
"epoch": 1.03,
"grad_norm": 0.6281269788742065,
"learning_rate": 0.00014868701206529453,
"loss": 0.9378,
"step": 725
},
{
"epoch": 1.03,
"grad_norm": 0.6167576313018799,
"learning_rate": 0.00014861603974449964,
"loss": 1.0196,
"step": 726
},
{
"epoch": 1.03,
"grad_norm": 0.6171091198921204,
"learning_rate": 0.00014854506742370476,
"loss": 0.9586,
"step": 727
},
{
"epoch": 1.03,
"grad_norm": 0.6009536385536194,
"learning_rate": 0.00014847409510290987,
"loss": 0.9412,
"step": 728
},
{
"epoch": 1.03,
"grad_norm": 0.6541954278945923,
"learning_rate": 0.00014840312278211498,
"loss": 0.9836,
"step": 729
},
{
"epoch": 1.03,
"grad_norm": 0.6337497234344482,
"learning_rate": 0.0001483321504613201,
"loss": 0.9975,
"step": 730
},
{
"epoch": 1.04,
"grad_norm": 0.625019907951355,
"learning_rate": 0.0001482611781405252,
"loss": 0.9526,
"step": 731
},
{
"epoch": 1.04,
"grad_norm": 0.6225535273551941,
"learning_rate": 0.00014819020581973032,
"loss": 1.0116,
"step": 732
},
{
"epoch": 1.04,
"grad_norm": 0.6529517769813538,
"learning_rate": 0.0001481192334989354,
"loss": 1.0537,
"step": 733
},
{
"epoch": 1.04,
"grad_norm": 0.6694484949111938,
"learning_rate": 0.00014804826117814052,
"loss": 0.9945,
"step": 734
},
{
"epoch": 1.04,
"grad_norm": 0.6434985995292664,
"learning_rate": 0.00014797728885734566,
"loss": 1.0104,
"step": 735
},
{
"epoch": 1.04,
"grad_norm": 0.643535852432251,
"learning_rate": 0.00014790631653655077,
"loss": 0.9848,
"step": 736
},
{
"epoch": 1.04,
"grad_norm": 0.6169139742851257,
"learning_rate": 0.00014783534421575586,
"loss": 0.8907,
"step": 737
},
{
"epoch": 1.05,
"grad_norm": 0.6531071662902832,
"learning_rate": 0.00014776437189496097,
"loss": 1.0544,
"step": 738
},
{
"epoch": 1.05,
"grad_norm": 0.6137780547142029,
"learning_rate": 0.00014769339957416608,
"loss": 0.919,
"step": 739
},
{
"epoch": 1.05,
"grad_norm": 0.685459554195404,
"learning_rate": 0.0001476224272533712,
"loss": 1.0618,
"step": 740
},
{
"epoch": 1.05,
"grad_norm": 0.6708681583404541,
"learning_rate": 0.00014755145493257628,
"loss": 0.9946,
"step": 741
},
{
"epoch": 1.05,
"grad_norm": 0.6316698789596558,
"learning_rate": 0.00014748048261178142,
"loss": 0.9819,
"step": 742
},
{
"epoch": 1.05,
"grad_norm": 0.622658371925354,
"learning_rate": 0.00014740951029098653,
"loss": 0.9845,
"step": 743
},
{
"epoch": 1.05,
"grad_norm": 0.6192933917045593,
"learning_rate": 0.00014733853797019165,
"loss": 0.9678,
"step": 744
},
{
"epoch": 1.06,
"grad_norm": 0.651993453502655,
"learning_rate": 0.00014726756564939673,
"loss": 0.9554,
"step": 745
},
{
"epoch": 1.06,
"grad_norm": 0.6194689869880676,
"learning_rate": 0.00014719659332860184,
"loss": 0.9167,
"step": 746
},
{
"epoch": 1.06,
"grad_norm": 0.6849738955497742,
"learning_rate": 0.00014712562100780696,
"loss": 0.9528,
"step": 747
},
{
"epoch": 1.06,
"grad_norm": 0.6835150122642517,
"learning_rate": 0.00014705464868701207,
"loss": 0.9799,
"step": 748
},
{
"epoch": 1.06,
"grad_norm": 0.6851332783699036,
"learning_rate": 0.00014698367636621718,
"loss": 1.0472,
"step": 749
},
{
"epoch": 1.06,
"grad_norm": 0.6524817943572998,
"learning_rate": 0.0001469127040454223,
"loss": 0.9258,
"step": 750
},
{
"epoch": 1.06,
"grad_norm": 0.6759579181671143,
"learning_rate": 0.0001468417317246274,
"loss": 0.9545,
"step": 751
},
{
"epoch": 1.07,
"grad_norm": 0.6385304927825928,
"learning_rate": 0.00014677075940383252,
"loss": 0.9751,
"step": 752
},
{
"epoch": 1.07,
"grad_norm": 0.6409848928451538,
"learning_rate": 0.00014669978708303763,
"loss": 0.9349,
"step": 753
},
{
"epoch": 1.07,
"grad_norm": 0.6459067463874817,
"learning_rate": 0.00014662881476224272,
"loss": 1.0341,
"step": 754
},
{
"epoch": 1.07,
"grad_norm": 0.6660894155502319,
"learning_rate": 0.00014655784244144783,
"loss": 1.0151,
"step": 755
},
{
"epoch": 1.07,
"grad_norm": 0.6208553910255432,
"learning_rate": 0.00014648687012065294,
"loss": 0.9547,
"step": 756
},
{
"epoch": 1.07,
"grad_norm": 0.6216368675231934,
"learning_rate": 0.00014641589779985806,
"loss": 1.0178,
"step": 757
},
{
"epoch": 1.07,
"grad_norm": 0.6845688819885254,
"learning_rate": 0.00014634492547906317,
"loss": 0.9976,
"step": 758
},
{
"epoch": 1.08,
"grad_norm": 0.6555722951889038,
"learning_rate": 0.00014627395315826828,
"loss": 0.9684,
"step": 759
},
{
"epoch": 1.08,
"grad_norm": 0.6671058535575867,
"learning_rate": 0.0001462029808374734,
"loss": 0.9691,
"step": 760
},
{
"epoch": 1.08,
"grad_norm": 0.6752671003341675,
"learning_rate": 0.0001461320085166785,
"loss": 0.9492,
"step": 761
},
{
"epoch": 1.08,
"grad_norm": 0.7133360505104065,
"learning_rate": 0.0001460610361958836,
"loss": 0.9867,
"step": 762
},
{
"epoch": 1.08,
"grad_norm": 0.6610892415046692,
"learning_rate": 0.0001459900638750887,
"loss": 0.9809,
"step": 763
},
{
"epoch": 1.08,
"grad_norm": 0.6392022371292114,
"learning_rate": 0.00014591909155429385,
"loss": 0.9485,
"step": 764
},
{
"epoch": 1.08,
"grad_norm": 0.6533021330833435,
"learning_rate": 0.00014584811923349896,
"loss": 1.0081,
"step": 765
},
{
"epoch": 1.09,
"grad_norm": 0.6720760464668274,
"learning_rate": 0.00014577714691270404,
"loss": 0.9421,
"step": 766
},
{
"epoch": 1.09,
"grad_norm": 0.6587719321250916,
"learning_rate": 0.00014570617459190916,
"loss": 0.953,
"step": 767
},
{
"epoch": 1.09,
"grad_norm": 0.6704784631729126,
"learning_rate": 0.00014563520227111427,
"loss": 0.9633,
"step": 768
},
{
"epoch": 1.09,
"grad_norm": 0.6440714597702026,
"learning_rate": 0.00014556422995031938,
"loss": 0.9655,
"step": 769
},
{
"epoch": 1.09,
"grad_norm": 0.6508157849311829,
"learning_rate": 0.00014549325762952447,
"loss": 0.9552,
"step": 770
},
{
"epoch": 1.09,
"grad_norm": 0.6560282707214355,
"learning_rate": 0.0001454222853087296,
"loss": 1.0196,
"step": 771
},
{
"epoch": 1.09,
"grad_norm": 0.6468494534492493,
"learning_rate": 0.00014535131298793472,
"loss": 0.9734,
"step": 772
},
{
"epoch": 1.1,
"grad_norm": 0.6644648313522339,
"learning_rate": 0.00014528034066713983,
"loss": 1.011,
"step": 773
},
{
"epoch": 1.1,
"grad_norm": 0.6273561120033264,
"learning_rate": 0.00014520936834634492,
"loss": 0.9456,
"step": 774
},
{
"epoch": 1.1,
"grad_norm": 0.6624634265899658,
"learning_rate": 0.00014513839602555003,
"loss": 1.0313,
"step": 775
},
{
"epoch": 1.1,
"grad_norm": 0.6488678455352783,
"learning_rate": 0.00014506742370475515,
"loss": 0.9081,
"step": 776
},
{
"epoch": 1.1,
"grad_norm": 0.6563031077384949,
"learning_rate": 0.00014499645138396026,
"loss": 0.9934,
"step": 777
},
{
"epoch": 1.1,
"grad_norm": 0.6557429432868958,
"learning_rate": 0.00014492547906316537,
"loss": 0.9944,
"step": 778
},
{
"epoch": 1.1,
"grad_norm": 0.6796723008155823,
"learning_rate": 0.00014485450674237048,
"loss": 0.9943,
"step": 779
},
{
"epoch": 1.11,
"grad_norm": 0.6730267405509949,
"learning_rate": 0.0001447835344215756,
"loss": 0.974,
"step": 780
},
{
"epoch": 1.11,
"grad_norm": 0.670569658279419,
"learning_rate": 0.0001447125621007807,
"loss": 0.9909,
"step": 781
},
{
"epoch": 1.11,
"grad_norm": 0.6504651308059692,
"learning_rate": 0.00014464158977998582,
"loss": 1.0146,
"step": 782
},
{
"epoch": 1.11,
"grad_norm": 0.6637928485870361,
"learning_rate": 0.0001445706174591909,
"loss": 0.9802,
"step": 783
},
{
"epoch": 1.11,
"grad_norm": 0.6541115045547485,
"learning_rate": 0.00014449964513839602,
"loss": 1.017,
"step": 784
},
{
"epoch": 1.11,
"grad_norm": 0.6643988490104675,
"learning_rate": 0.00014442867281760116,
"loss": 0.9983,
"step": 785
},
{
"epoch": 1.11,
"grad_norm": 0.6713914275169373,
"learning_rate": 0.00014435770049680625,
"loss": 0.9617,
"step": 786
},
{
"epoch": 1.11,
"grad_norm": 0.6338568329811096,
"learning_rate": 0.00014428672817601136,
"loss": 0.8769,
"step": 787
},
{
"epoch": 1.12,
"grad_norm": 0.6811724305152893,
"learning_rate": 0.00014421575585521647,
"loss": 1.0026,
"step": 788
},
{
"epoch": 1.12,
"grad_norm": 0.7284192442893982,
"learning_rate": 0.00014414478353442158,
"loss": 0.9643,
"step": 789
},
{
"epoch": 1.12,
"grad_norm": 0.6587553024291992,
"learning_rate": 0.0001440738112136267,
"loss": 0.9988,
"step": 790
},
{
"epoch": 1.12,
"grad_norm": 0.6484501957893372,
"learning_rate": 0.00014400283889283178,
"loss": 0.9761,
"step": 791
},
{
"epoch": 1.12,
"grad_norm": 0.6753117442131042,
"learning_rate": 0.00014393186657203692,
"loss": 1.0066,
"step": 792
},
{
"epoch": 1.12,
"grad_norm": 0.6668440699577332,
"learning_rate": 0.00014386089425124203,
"loss": 0.9439,
"step": 793
},
{
"epoch": 1.12,
"grad_norm": 0.6919518709182739,
"learning_rate": 0.00014378992193044715,
"loss": 0.9829,
"step": 794
},
{
"epoch": 1.13,
"grad_norm": 0.6882046461105347,
"learning_rate": 0.00014371894960965223,
"loss": 1.0086,
"step": 795
},
{
"epoch": 1.13,
"grad_norm": 0.641126811504364,
"learning_rate": 0.00014364797728885735,
"loss": 0.9792,
"step": 796
},
{
"epoch": 1.13,
"grad_norm": 0.6557233333587646,
"learning_rate": 0.00014357700496806246,
"loss": 0.8754,
"step": 797
},
{
"epoch": 1.13,
"grad_norm": 0.6546401381492615,
"learning_rate": 0.00014350603264726757,
"loss": 0.9407,
"step": 798
},
{
"epoch": 1.13,
"grad_norm": 0.6610774397850037,
"learning_rate": 0.00014343506032647268,
"loss": 1.0165,
"step": 799
},
{
"epoch": 1.13,
"grad_norm": 0.692677915096283,
"learning_rate": 0.0001433640880056778,
"loss": 0.931,
"step": 800
},
{
"epoch": 1.13,
"grad_norm": 0.6578258872032166,
"learning_rate": 0.0001432931156848829,
"loss": 0.9976,
"step": 801
},
{
"epoch": 1.14,
"grad_norm": 0.6596624851226807,
"learning_rate": 0.00014322214336408802,
"loss": 0.9312,
"step": 802
},
{
"epoch": 1.14,
"grad_norm": 0.6386009454727173,
"learning_rate": 0.0001431511710432931,
"loss": 0.992,
"step": 803
},
{
"epoch": 1.14,
"grad_norm": 0.6921107769012451,
"learning_rate": 0.00014308019872249822,
"loss": 0.9981,
"step": 804
},
{
"epoch": 1.14,
"grad_norm": 0.6416789293289185,
"learning_rate": 0.00014300922640170333,
"loss": 1.0192,
"step": 805
},
{
"epoch": 1.14,
"grad_norm": 0.6847473382949829,
"learning_rate": 0.00014293825408090847,
"loss": 0.945,
"step": 806
},
{
"epoch": 1.14,
"grad_norm": 0.6717972755432129,
"learning_rate": 0.00014286728176011356,
"loss": 0.9767,
"step": 807
},
{
"epoch": 1.14,
"grad_norm": 0.6813983917236328,
"learning_rate": 0.00014279630943931867,
"loss": 0.9056,
"step": 808
},
{
"epoch": 1.15,
"grad_norm": 0.6662610769271851,
"learning_rate": 0.00014272533711852378,
"loss": 1.1045,
"step": 809
},
{
"epoch": 1.15,
"grad_norm": 0.6779379844665527,
"learning_rate": 0.0001426543647977289,
"loss": 0.9158,
"step": 810
},
{
"epoch": 1.15,
"grad_norm": 0.6368980407714844,
"learning_rate": 0.000142583392476934,
"loss": 0.8638,
"step": 811
},
{
"epoch": 1.15,
"grad_norm": 0.648033857345581,
"learning_rate": 0.0001425124201561391,
"loss": 1.0107,
"step": 812
},
{
"epoch": 1.15,
"grad_norm": 0.6696658134460449,
"learning_rate": 0.00014244144783534423,
"loss": 0.9926,
"step": 813
},
{
"epoch": 1.15,
"grad_norm": 0.6661670804023743,
"learning_rate": 0.00014237047551454935,
"loss": 0.9805,
"step": 814
},
{
"epoch": 1.15,
"grad_norm": 0.6772279739379883,
"learning_rate": 0.00014229950319375443,
"loss": 0.9443,
"step": 815
},
{
"epoch": 1.16,
"grad_norm": 0.7234826683998108,
"learning_rate": 0.00014222853087295955,
"loss": 1.0605,
"step": 816
},
{
"epoch": 1.16,
"grad_norm": 0.6951813101768494,
"learning_rate": 0.00014215755855216466,
"loss": 1.0043,
"step": 817
},
{
"epoch": 1.16,
"grad_norm": 0.6815015077590942,
"learning_rate": 0.00014208658623136977,
"loss": 0.9496,
"step": 818
},
{
"epoch": 1.16,
"grad_norm": 0.6754716634750366,
"learning_rate": 0.00014201561391057488,
"loss": 0.954,
"step": 819
},
{
"epoch": 1.16,
"grad_norm": 0.6836742758750916,
"learning_rate": 0.00014194464158978,
"loss": 0.9787,
"step": 820
},
{
"epoch": 1.16,
"grad_norm": 0.6898475885391235,
"learning_rate": 0.0001418736692689851,
"loss": 1.0429,
"step": 821
},
{
"epoch": 1.16,
"grad_norm": 0.6403341889381409,
"learning_rate": 0.00014180269694819022,
"loss": 0.9681,
"step": 822
},
{
"epoch": 1.17,
"grad_norm": 0.6508347392082214,
"learning_rate": 0.00014173172462739533,
"loss": 1.0251,
"step": 823
},
{
"epoch": 1.17,
"grad_norm": 0.6511325836181641,
"learning_rate": 0.00014166075230660042,
"loss": 0.934,
"step": 824
},
{
"epoch": 1.17,
"grad_norm": 0.6893059611320496,
"learning_rate": 0.00014158977998580553,
"loss": 0.9481,
"step": 825
},
{
"epoch": 1.17,
"grad_norm": 0.7065815329551697,
"learning_rate": 0.00014151880766501065,
"loss": 0.9531,
"step": 826
},
{
"epoch": 1.17,
"grad_norm": 0.6699268221855164,
"learning_rate": 0.00014144783534421579,
"loss": 0.9593,
"step": 827
},
{
"epoch": 1.17,
"grad_norm": 0.691835343837738,
"learning_rate": 0.00014137686302342087,
"loss": 1.0152,
"step": 828
},
{
"epoch": 1.17,
"grad_norm": 0.6948768496513367,
"learning_rate": 0.00014130589070262598,
"loss": 0.9809,
"step": 829
},
{
"epoch": 1.18,
"grad_norm": 0.685308039188385,
"learning_rate": 0.0001412349183818311,
"loss": 0.9709,
"step": 830
},
{
"epoch": 1.18,
"grad_norm": 0.7141802310943604,
"learning_rate": 0.0001411639460610362,
"loss": 1.0286,
"step": 831
},
{
"epoch": 1.18,
"grad_norm": 0.6938399076461792,
"learning_rate": 0.0001410929737402413,
"loss": 1.0431,
"step": 832
},
{
"epoch": 1.18,
"grad_norm": 0.6528292894363403,
"learning_rate": 0.0001410220014194464,
"loss": 0.9173,
"step": 833
},
{
"epoch": 1.18,
"grad_norm": 0.675507664680481,
"learning_rate": 0.00014095102909865155,
"loss": 0.958,
"step": 834
},
{
"epoch": 1.18,
"grad_norm": 0.668474555015564,
"learning_rate": 0.00014088005677785666,
"loss": 1.0094,
"step": 835
},
{
"epoch": 1.18,
"grad_norm": 0.6946351528167725,
"learning_rate": 0.00014080908445706175,
"loss": 0.986,
"step": 836
},
{
"epoch": 1.19,
"grad_norm": 0.701863706111908,
"learning_rate": 0.00014073811213626686,
"loss": 0.9987,
"step": 837
},
{
"epoch": 1.19,
"grad_norm": 0.722443163394928,
"learning_rate": 0.00014066713981547197,
"loss": 0.9995,
"step": 838
},
{
"epoch": 1.19,
"grad_norm": 0.675977885723114,
"learning_rate": 0.00014059616749467708,
"loss": 0.9929,
"step": 839
},
{
"epoch": 1.19,
"grad_norm": 0.7431498765945435,
"learning_rate": 0.0001405251951738822,
"loss": 1.0413,
"step": 840
},
{
"epoch": 1.19,
"grad_norm": 0.6718546152114868,
"learning_rate": 0.00014045422285308728,
"loss": 0.9265,
"step": 841
},
{
"epoch": 1.19,
"grad_norm": 0.6670490503311157,
"learning_rate": 0.00014038325053229242,
"loss": 0.9314,
"step": 842
},
{
"epoch": 1.19,
"grad_norm": 0.6900096535682678,
"learning_rate": 0.00014031227821149754,
"loss": 1.0211,
"step": 843
},
{
"epoch": 1.2,
"grad_norm": 0.6813675761222839,
"learning_rate": 0.00014024130589070265,
"loss": 1.0462,
"step": 844
},
{
"epoch": 1.2,
"grad_norm": 0.6931719183921814,
"learning_rate": 0.00014017033356990773,
"loss": 0.9741,
"step": 845
},
{
"epoch": 1.2,
"grad_norm": 0.683791995048523,
"learning_rate": 0.00014009936124911285,
"loss": 1.054,
"step": 846
},
{
"epoch": 1.2,
"grad_norm": 0.7232967615127563,
"learning_rate": 0.00014002838892831796,
"loss": 0.9365,
"step": 847
},
{
"epoch": 1.2,
"grad_norm": 0.6782677173614502,
"learning_rate": 0.00013995741660752307,
"loss": 0.9311,
"step": 848
},
{
"epoch": 1.2,
"grad_norm": 0.6695821285247803,
"learning_rate": 0.00013988644428672818,
"loss": 0.993,
"step": 849
},
{
"epoch": 1.2,
"grad_norm": 0.6955466866493225,
"learning_rate": 0.0001398154719659333,
"loss": 0.985,
"step": 850
},
{
"epoch": 1.21,
"grad_norm": 0.692164421081543,
"learning_rate": 0.0001397444996451384,
"loss": 0.9836,
"step": 851
},
{
"epoch": 1.21,
"grad_norm": 0.6746982932090759,
"learning_rate": 0.00013967352732434352,
"loss": 0.9096,
"step": 852
},
{
"epoch": 1.21,
"grad_norm": 0.6445276141166687,
"learning_rate": 0.0001396025550035486,
"loss": 1.0032,
"step": 853
},
{
"epoch": 1.21,
"grad_norm": 0.7133185863494873,
"learning_rate": 0.00013953158268275372,
"loss": 1.009,
"step": 854
},
{
"epoch": 1.21,
"grad_norm": 0.6872855424880981,
"learning_rate": 0.00013946061036195883,
"loss": 1.0236,
"step": 855
},
{
"epoch": 1.21,
"grad_norm": 0.7020975351333618,
"learning_rate": 0.00013938963804116397,
"loss": 1.0142,
"step": 856
},
{
"epoch": 1.21,
"grad_norm": 0.663386881351471,
"learning_rate": 0.00013931866572036906,
"loss": 0.9976,
"step": 857
},
{
"epoch": 1.22,
"grad_norm": 0.7051934599876404,
"learning_rate": 0.00013924769339957417,
"loss": 1.0646,
"step": 858
},
{
"epoch": 1.22,
"grad_norm": 0.6661969423294067,
"learning_rate": 0.00013917672107877928,
"loss": 0.9287,
"step": 859
},
{
"epoch": 1.22,
"grad_norm": 0.6907512545585632,
"learning_rate": 0.0001391057487579844,
"loss": 1.0021,
"step": 860
},
{
"epoch": 1.22,
"grad_norm": 0.66982501745224,
"learning_rate": 0.00013903477643718948,
"loss": 0.9768,
"step": 861
},
{
"epoch": 1.22,
"grad_norm": 0.6967639923095703,
"learning_rate": 0.0001389638041163946,
"loss": 0.994,
"step": 862
},
{
"epoch": 1.22,
"grad_norm": 0.7178871035575867,
"learning_rate": 0.00013889283179559974,
"loss": 0.9517,
"step": 863
},
{
"epoch": 1.22,
"grad_norm": 0.6952171921730042,
"learning_rate": 0.00013882185947480485,
"loss": 1.0229,
"step": 864
},
{
"epoch": 1.23,
"grad_norm": 0.6783744096755981,
"learning_rate": 0.00013875088715400993,
"loss": 0.9575,
"step": 865
},
{
"epoch": 1.23,
"grad_norm": 0.6978974938392639,
"learning_rate": 0.00013867991483321505,
"loss": 0.9912,
"step": 866
},
{
"epoch": 1.23,
"grad_norm": 0.6688209772109985,
"learning_rate": 0.00013860894251242016,
"loss": 0.9916,
"step": 867
},
{
"epoch": 1.23,
"grad_norm": 0.6768879890441895,
"learning_rate": 0.00013853797019162527,
"loss": 1.0178,
"step": 868
},
{
"epoch": 1.23,
"grad_norm": 0.6849448084831238,
"learning_rate": 0.00013846699787083038,
"loss": 1.003,
"step": 869
},
{
"epoch": 1.23,
"grad_norm": 0.6396516561508179,
"learning_rate": 0.0001383960255500355,
"loss": 0.8896,
"step": 870
},
{
"epoch": 1.23,
"grad_norm": 0.6769018769264221,
"learning_rate": 0.0001383250532292406,
"loss": 1.0025,
"step": 871
},
{
"epoch": 1.24,
"grad_norm": 0.7078860998153687,
"learning_rate": 0.00013825408090844572,
"loss": 0.9968,
"step": 872
},
{
"epoch": 1.24,
"grad_norm": 0.6808391213417053,
"learning_rate": 0.00013818310858765084,
"loss": 0.9811,
"step": 873
},
{
"epoch": 1.24,
"grad_norm": 0.7080936431884766,
"learning_rate": 0.00013811213626685592,
"loss": 0.9996,
"step": 874
},
{
"epoch": 1.24,
"grad_norm": 0.7115461230278015,
"learning_rate": 0.00013804116394606103,
"loss": 0.9898,
"step": 875
},
{
"epoch": 1.24,
"grad_norm": 0.6735612154006958,
"learning_rate": 0.00013797019162526615,
"loss": 0.9852,
"step": 876
},
{
"epoch": 1.24,
"grad_norm": 0.7107511162757874,
"learning_rate": 0.00013789921930447126,
"loss": 1.0363,
"step": 877
},
{
"epoch": 1.24,
"grad_norm": 0.7112343311309814,
"learning_rate": 0.00013782824698367637,
"loss": 1.0529,
"step": 878
},
{
"epoch": 1.25,
"grad_norm": 0.6874762773513794,
"learning_rate": 0.00013775727466288148,
"loss": 0.9774,
"step": 879
},
{
"epoch": 1.25,
"grad_norm": 0.6808403134346008,
"learning_rate": 0.0001376863023420866,
"loss": 1.0055,
"step": 880
},
{
"epoch": 1.25,
"grad_norm": 0.700853705406189,
"learning_rate": 0.0001376153300212917,
"loss": 1.0177,
"step": 881
},
{
"epoch": 1.25,
"grad_norm": 0.7160459160804749,
"learning_rate": 0.0001375443577004968,
"loss": 0.9944,
"step": 882
},
{
"epoch": 1.25,
"grad_norm": 0.7002721428871155,
"learning_rate": 0.0001374733853797019,
"loss": 0.9271,
"step": 883
},
{
"epoch": 1.25,
"grad_norm": 0.7237516045570374,
"learning_rate": 0.00013740241305890705,
"loss": 0.9932,
"step": 884
},
{
"epoch": 1.25,
"grad_norm": 0.7104388475418091,
"learning_rate": 0.00013733144073811216,
"loss": 1.044,
"step": 885
},
{
"epoch": 1.26,
"grad_norm": 0.6789724826812744,
"learning_rate": 0.00013726046841731725,
"loss": 0.9781,
"step": 886
},
{
"epoch": 1.26,
"grad_norm": 0.6558204293251038,
"learning_rate": 0.00013718949609652236,
"loss": 0.9708,
"step": 887
},
{
"epoch": 1.26,
"grad_norm": 0.6882026791572571,
"learning_rate": 0.00013711852377572747,
"loss": 0.9466,
"step": 888
},
{
"epoch": 1.26,
"grad_norm": 0.6949423551559448,
"learning_rate": 0.00013704755145493258,
"loss": 1.0056,
"step": 889
},
{
"epoch": 1.26,
"grad_norm": 0.7023829221725464,
"learning_rate": 0.00013697657913413767,
"loss": 0.982,
"step": 890
},
{
"epoch": 1.26,
"grad_norm": 0.7513879537582397,
"learning_rate": 0.0001369056068133428,
"loss": 0.987,
"step": 891
},
{
"epoch": 1.26,
"grad_norm": 0.6762917041778564,
"learning_rate": 0.00013683463449254792,
"loss": 0.9202,
"step": 892
},
{
"epoch": 1.27,
"grad_norm": 0.7018841505050659,
"learning_rate": 0.00013676366217175304,
"loss": 1.0525,
"step": 893
},
{
"epoch": 1.27,
"grad_norm": 0.6917586922645569,
"learning_rate": 0.00013669268985095812,
"loss": 0.9392,
"step": 894
},
{
"epoch": 1.27,
"grad_norm": 0.7120482325553894,
"learning_rate": 0.00013662171753016323,
"loss": 1.0218,
"step": 895
},
{
"epoch": 1.27,
"grad_norm": 0.6614890098571777,
"learning_rate": 0.00013655074520936835,
"loss": 0.9586,
"step": 896
},
{
"epoch": 1.27,
"grad_norm": 0.713033139705658,
"learning_rate": 0.00013647977288857346,
"loss": 0.9867,
"step": 897
},
{
"epoch": 1.27,
"grad_norm": 0.6614014506340027,
"learning_rate": 0.00013640880056777857,
"loss": 0.9747,
"step": 898
},
{
"epoch": 1.27,
"grad_norm": 0.6631972193717957,
"learning_rate": 0.00013633782824698369,
"loss": 1.0244,
"step": 899
},
{
"epoch": 1.28,
"grad_norm": 0.6855646967887878,
"learning_rate": 0.0001362668559261888,
"loss": 1.0053,
"step": 900
},
{
"epoch": 1.28,
"grad_norm": 0.6771067976951599,
"learning_rate": 0.0001361958836053939,
"loss": 0.956,
"step": 901
},
{
"epoch": 1.28,
"grad_norm": 0.7038958668708801,
"learning_rate": 0.00013612491128459902,
"loss": 1.0221,
"step": 902
},
{
"epoch": 1.28,
"grad_norm": 0.7114011645317078,
"learning_rate": 0.0001360539389638041,
"loss": 1.0298,
"step": 903
},
{
"epoch": 1.28,
"grad_norm": 0.707399308681488,
"learning_rate": 0.00013598296664300922,
"loss": 0.9623,
"step": 904
},
{
"epoch": 1.28,
"grad_norm": 0.7122653722763062,
"learning_rate": 0.00013591199432221436,
"loss": 1.0042,
"step": 905
},
{
"epoch": 1.28,
"grad_norm": 0.699234664440155,
"learning_rate": 0.00013584102200141945,
"loss": 0.9806,
"step": 906
},
{
"epoch": 1.28,
"grad_norm": 0.7016271352767944,
"learning_rate": 0.00013577004968062456,
"loss": 0.9817,
"step": 907
},
{
"epoch": 1.29,
"grad_norm": 0.7013638019561768,
"learning_rate": 0.00013569907735982967,
"loss": 1.0045,
"step": 908
},
{
"epoch": 1.29,
"grad_norm": 0.7025014162063599,
"learning_rate": 0.00013562810503903479,
"loss": 1.0147,
"step": 909
},
{
"epoch": 1.29,
"grad_norm": 0.718924880027771,
"learning_rate": 0.0001355571327182399,
"loss": 1.0292,
"step": 910
},
{
"epoch": 1.29,
"grad_norm": 0.6805405020713806,
"learning_rate": 0.00013548616039744498,
"loss": 0.9774,
"step": 911
},
{
"epoch": 1.29,
"grad_norm": 0.6721410155296326,
"learning_rate": 0.00013541518807665012,
"loss": 0.9475,
"step": 912
},
{
"epoch": 1.29,
"grad_norm": 0.6628838181495667,
"learning_rate": 0.00013534421575585524,
"loss": 0.9408,
"step": 913
},
{
"epoch": 1.29,
"grad_norm": 0.7161340713500977,
"learning_rate": 0.00013527324343506035,
"loss": 0.9819,
"step": 914
},
{
"epoch": 1.3,
"grad_norm": 0.7012698650360107,
"learning_rate": 0.00013520227111426543,
"loss": 1.0074,
"step": 915
},
{
"epoch": 1.3,
"grad_norm": 0.6986689567565918,
"learning_rate": 0.00013513129879347055,
"loss": 0.9736,
"step": 916
},
{
"epoch": 1.3,
"grad_norm": 0.7003822922706604,
"learning_rate": 0.00013506032647267566,
"loss": 1.0369,
"step": 917
},
{
"epoch": 1.3,
"grad_norm": 0.723176896572113,
"learning_rate": 0.00013498935415188077,
"loss": 0.9936,
"step": 918
},
{
"epoch": 1.3,
"grad_norm": 0.7104766368865967,
"learning_rate": 0.00013491838183108589,
"loss": 0.9297,
"step": 919
},
{
"epoch": 1.3,
"grad_norm": 0.7126396894454956,
"learning_rate": 0.000134847409510291,
"loss": 1.0051,
"step": 920
},
{
"epoch": 1.3,
"grad_norm": 0.6854078769683838,
"learning_rate": 0.0001347764371894961,
"loss": 0.8957,
"step": 921
},
{
"epoch": 1.31,
"grad_norm": 0.7038775086402893,
"learning_rate": 0.00013470546486870122,
"loss": 1.0806,
"step": 922
},
{
"epoch": 1.31,
"grad_norm": 0.6905360817909241,
"learning_rate": 0.0001346344925479063,
"loss": 1.0533,
"step": 923
},
{
"epoch": 1.31,
"grad_norm": 0.6846379637718201,
"learning_rate": 0.00013456352022711142,
"loss": 0.9419,
"step": 924
},
{
"epoch": 1.31,
"grad_norm": 0.6838957667350769,
"learning_rate": 0.00013449254790631653,
"loss": 1.0041,
"step": 925
},
{
"epoch": 1.31,
"grad_norm": 0.6939088106155396,
"learning_rate": 0.00013442157558552167,
"loss": 1.0256,
"step": 926
},
{
"epoch": 1.31,
"grad_norm": 0.705324649810791,
"learning_rate": 0.00013435060326472676,
"loss": 1.0321,
"step": 927
},
{
"epoch": 1.31,
"grad_norm": 0.680023729801178,
"learning_rate": 0.00013427963094393187,
"loss": 0.9367,
"step": 928
},
{
"epoch": 1.32,
"grad_norm": 0.6934013962745667,
"learning_rate": 0.00013420865862313699,
"loss": 0.9717,
"step": 929
},
{
"epoch": 1.32,
"grad_norm": 0.6807674765586853,
"learning_rate": 0.0001341376863023421,
"loss": 0.9661,
"step": 930
},
{
"epoch": 1.32,
"grad_norm": 0.695918083190918,
"learning_rate": 0.0001340667139815472,
"loss": 0.9762,
"step": 931
},
{
"epoch": 1.32,
"grad_norm": 0.6872202754020691,
"learning_rate": 0.0001339957416607523,
"loss": 0.9487,
"step": 932
},
{
"epoch": 1.32,
"grad_norm": 0.7057836651802063,
"learning_rate": 0.0001339247693399574,
"loss": 0.9665,
"step": 933
},
{
"epoch": 1.32,
"grad_norm": 0.688558042049408,
"learning_rate": 0.00013385379701916255,
"loss": 0.9639,
"step": 934
},
{
"epoch": 1.32,
"grad_norm": 0.7189505100250244,
"learning_rate": 0.00013378282469836763,
"loss": 1.0014,
"step": 935
},
{
"epoch": 1.33,
"grad_norm": 0.6903408169746399,
"learning_rate": 0.00013371185237757275,
"loss": 1.0319,
"step": 936
},
{
"epoch": 1.33,
"grad_norm": 0.6907972097396851,
"learning_rate": 0.00013364088005677786,
"loss": 0.9705,
"step": 937
},
{
"epoch": 1.33,
"grad_norm": 0.7174811363220215,
"learning_rate": 0.00013356990773598297,
"loss": 0.9992,
"step": 938
},
{
"epoch": 1.33,
"grad_norm": 0.6912071108818054,
"learning_rate": 0.00013349893541518809,
"loss": 0.9241,
"step": 939
},
{
"epoch": 1.33,
"grad_norm": 0.7143810391426086,
"learning_rate": 0.00013342796309439317,
"loss": 1.0302,
"step": 940
},
{
"epoch": 1.33,
"grad_norm": 0.6805667281150818,
"learning_rate": 0.0001333569907735983,
"loss": 0.9711,
"step": 941
},
{
"epoch": 1.33,
"grad_norm": 0.6745904088020325,
"learning_rate": 0.00013328601845280342,
"loss": 0.9788,
"step": 942
},
{
"epoch": 1.34,
"grad_norm": 0.7004431486129761,
"learning_rate": 0.00013321504613200854,
"loss": 0.913,
"step": 943
},
{
"epoch": 1.34,
"grad_norm": 0.709544837474823,
"learning_rate": 0.00013314407381121362,
"loss": 1.0007,
"step": 944
},
{
"epoch": 1.34,
"grad_norm": 0.7254632115364075,
"learning_rate": 0.00013307310149041873,
"loss": 0.9366,
"step": 945
},
{
"epoch": 1.34,
"grad_norm": 0.7147793173789978,
"learning_rate": 0.00013300212916962385,
"loss": 0.9834,
"step": 946
},
{
"epoch": 1.34,
"grad_norm": 0.733207106590271,
"learning_rate": 0.00013293115684882896,
"loss": 1.0158,
"step": 947
},
{
"epoch": 1.34,
"grad_norm": 0.7627166509628296,
"learning_rate": 0.00013286018452803407,
"loss": 1.0266,
"step": 948
},
{
"epoch": 1.34,
"grad_norm": 0.7074815034866333,
"learning_rate": 0.00013278921220723919,
"loss": 1.012,
"step": 949
},
{
"epoch": 1.35,
"grad_norm": 0.7292148470878601,
"learning_rate": 0.0001327182398864443,
"loss": 1.0539,
"step": 950
},
{
"epoch": 1.35,
"grad_norm": 0.6858687400817871,
"learning_rate": 0.0001326472675656494,
"loss": 1.0065,
"step": 951
},
{
"epoch": 1.35,
"grad_norm": 0.6861886978149414,
"learning_rate": 0.0001325762952448545,
"loss": 0.9988,
"step": 952
},
{
"epoch": 1.35,
"grad_norm": 0.6918312311172485,
"learning_rate": 0.0001325053229240596,
"loss": 1.0447,
"step": 953
},
{
"epoch": 1.35,
"grad_norm": 0.6733079552650452,
"learning_rate": 0.00013243435060326472,
"loss": 0.9746,
"step": 954
},
{
"epoch": 1.35,
"grad_norm": 0.7106968760490417,
"learning_rate": 0.00013236337828246986,
"loss": 1.0219,
"step": 955
},
{
"epoch": 1.35,
"grad_norm": 0.7458065748214722,
"learning_rate": 0.00013229240596167495,
"loss": 0.9056,
"step": 956
},
{
"epoch": 1.36,
"grad_norm": 0.7352179884910583,
"learning_rate": 0.00013222143364088006,
"loss": 0.9863,
"step": 957
},
{
"epoch": 1.36,
"grad_norm": 0.6810528635978699,
"learning_rate": 0.00013215046132008517,
"loss": 0.9686,
"step": 958
},
{
"epoch": 1.36,
"grad_norm": 0.6850380301475525,
"learning_rate": 0.00013207948899929029,
"loss": 0.9124,
"step": 959
},
{
"epoch": 1.36,
"grad_norm": 0.7115997672080994,
"learning_rate": 0.0001320085166784954,
"loss": 0.9862,
"step": 960
},
{
"epoch": 1.36,
"grad_norm": 0.7444871664047241,
"learning_rate": 0.00013193754435770048,
"loss": 0.9958,
"step": 961
},
{
"epoch": 1.36,
"grad_norm": 0.7253167629241943,
"learning_rate": 0.00013186657203690562,
"loss": 0.9751,
"step": 962
},
{
"epoch": 1.36,
"grad_norm": 0.7287327647209167,
"learning_rate": 0.00013179559971611074,
"loss": 0.9621,
"step": 963
},
{
"epoch": 1.37,
"grad_norm": 0.7266460061073303,
"learning_rate": 0.00013172462739531582,
"loss": 1.0085,
"step": 964
},
{
"epoch": 1.37,
"grad_norm": 0.6825550198554993,
"learning_rate": 0.00013165365507452094,
"loss": 0.9696,
"step": 965
},
{
"epoch": 1.37,
"grad_norm": 0.7191211581230164,
"learning_rate": 0.00013158268275372605,
"loss": 1.0167,
"step": 966
},
{
"epoch": 1.37,
"grad_norm": 0.6877723336219788,
"learning_rate": 0.00013151171043293116,
"loss": 0.9224,
"step": 967
},
{
"epoch": 1.37,
"grad_norm": 0.6919003129005432,
"learning_rate": 0.00013144073811213627,
"loss": 0.9628,
"step": 968
},
{
"epoch": 1.37,
"grad_norm": 0.7358181476593018,
"learning_rate": 0.00013136976579134139,
"loss": 1.0684,
"step": 969
},
{
"epoch": 1.37,
"grad_norm": 0.7027356624603271,
"learning_rate": 0.0001312987934705465,
"loss": 1.0333,
"step": 970
},
{
"epoch": 1.38,
"grad_norm": 0.6980712413787842,
"learning_rate": 0.0001312278211497516,
"loss": 0.9823,
"step": 971
},
{
"epoch": 1.38,
"grad_norm": 0.7023440599441528,
"learning_rate": 0.00013115684882895672,
"loss": 0.9497,
"step": 972
},
{
"epoch": 1.38,
"grad_norm": 0.7038329839706421,
"learning_rate": 0.0001310858765081618,
"loss": 0.9915,
"step": 973
},
{
"epoch": 1.38,
"grad_norm": 0.7077039480209351,
"learning_rate": 0.00013101490418736692,
"loss": 0.947,
"step": 974
},
{
"epoch": 1.38,
"grad_norm": 0.7472418546676636,
"learning_rate": 0.00013094393186657204,
"loss": 0.9416,
"step": 975
},
{
"epoch": 1.38,
"grad_norm": 0.6974193453788757,
"learning_rate": 0.00013087295954577718,
"loss": 0.9788,
"step": 976
},
{
"epoch": 1.38,
"grad_norm": 0.6888452768325806,
"learning_rate": 0.00013080198722498226,
"loss": 0.9453,
"step": 977
},
{
"epoch": 1.39,
"grad_norm": 0.6868741512298584,
"learning_rate": 0.00013073101490418737,
"loss": 0.9096,
"step": 978
},
{
"epoch": 1.39,
"grad_norm": 0.701124370098114,
"learning_rate": 0.00013066004258339249,
"loss": 1.0178,
"step": 979
},
{
"epoch": 1.39,
"grad_norm": 0.7294360399246216,
"learning_rate": 0.0001305890702625976,
"loss": 1.0384,
"step": 980
},
{
"epoch": 1.39,
"grad_norm": 0.719120442867279,
"learning_rate": 0.00013051809794180268,
"loss": 0.9209,
"step": 981
},
{
"epoch": 1.39,
"grad_norm": 0.718369722366333,
"learning_rate": 0.0001304471256210078,
"loss": 0.9457,
"step": 982
},
{
"epoch": 1.39,
"grad_norm": 0.6928309202194214,
"learning_rate": 0.00013037615330021294,
"loss": 0.9093,
"step": 983
},
{
"epoch": 1.39,
"grad_norm": 0.7082747220993042,
"learning_rate": 0.00013030518097941805,
"loss": 0.9639,
"step": 984
},
{
"epoch": 1.4,
"grad_norm": 0.747394323348999,
"learning_rate": 0.00013023420865862314,
"loss": 1.0332,
"step": 985
},
{
"epoch": 1.4,
"grad_norm": 0.6965970993041992,
"learning_rate": 0.00013016323633782825,
"loss": 1.011,
"step": 986
},
{
"epoch": 1.4,
"grad_norm": 0.6957747340202332,
"learning_rate": 0.00013009226401703336,
"loss": 0.9917,
"step": 987
},
{
"epoch": 1.4,
"grad_norm": 0.7042578458786011,
"learning_rate": 0.00013002129169623847,
"loss": 0.9221,
"step": 988
},
{
"epoch": 1.4,
"grad_norm": 0.709457278251648,
"learning_rate": 0.0001299503193754436,
"loss": 0.9621,
"step": 989
},
{
"epoch": 1.4,
"grad_norm": 0.7135986685752869,
"learning_rate": 0.0001298793470546487,
"loss": 0.9894,
"step": 990
},
{
"epoch": 1.4,
"grad_norm": 0.7359340190887451,
"learning_rate": 0.0001298083747338538,
"loss": 0.9508,
"step": 991
},
{
"epoch": 1.41,
"grad_norm": 0.7456433176994324,
"learning_rate": 0.00012973740241305892,
"loss": 1.0153,
"step": 992
},
{
"epoch": 1.41,
"grad_norm": 0.7097339034080505,
"learning_rate": 0.00012966643009226404,
"loss": 0.9799,
"step": 993
},
{
"epoch": 1.41,
"grad_norm": 0.7416273951530457,
"learning_rate": 0.00012959545777146912,
"loss": 1.0445,
"step": 994
},
{
"epoch": 1.41,
"grad_norm": 0.7126497030258179,
"learning_rate": 0.00012952448545067424,
"loss": 0.9907,
"step": 995
},
{
"epoch": 1.41,
"grad_norm": 0.7022689580917358,
"learning_rate": 0.00012945351312987935,
"loss": 0.9576,
"step": 996
},
{
"epoch": 1.41,
"grad_norm": 0.728905439376831,
"learning_rate": 0.00012938254080908446,
"loss": 0.9537,
"step": 997
},
{
"epoch": 1.41,
"grad_norm": 0.7206951975822449,
"learning_rate": 0.00012931156848828957,
"loss": 1.0048,
"step": 998
},
{
"epoch": 1.42,
"grad_norm": 0.6895846724510193,
"learning_rate": 0.0001292405961674947,
"loss": 1.0182,
"step": 999
},
{
"epoch": 1.42,
"grad_norm": 0.7300769090652466,
"learning_rate": 0.0001291696238466998,
"loss": 1.0212,
"step": 1000
},
{
"epoch": 1.42,
"grad_norm": 0.7058583498001099,
"learning_rate": 0.0001290986515259049,
"loss": 0.9982,
"step": 1001
},
{
"epoch": 1.42,
"grad_norm": 0.681090235710144,
"learning_rate": 0.00012902767920511,
"loss": 0.9898,
"step": 1002
},
{
"epoch": 1.42,
"grad_norm": 0.6858307123184204,
"learning_rate": 0.0001289567068843151,
"loss": 0.961,
"step": 1003
},
{
"epoch": 1.42,
"grad_norm": 0.6954583525657654,
"learning_rate": 0.00012888573456352025,
"loss": 1.0346,
"step": 1004
},
{
"epoch": 1.42,
"grad_norm": 0.7096951007843018,
"learning_rate": 0.00012881476224272536,
"loss": 0.9599,
"step": 1005
},
{
"epoch": 1.43,
"grad_norm": 0.7118542194366455,
"learning_rate": 0.00012874378992193045,
"loss": 0.9677,
"step": 1006
},
{
"epoch": 1.43,
"grad_norm": 0.7197057604789734,
"learning_rate": 0.00012867281760113556,
"loss": 0.937,
"step": 1007
},
{
"epoch": 1.43,
"grad_norm": 0.720856785774231,
"learning_rate": 0.00012860184528034067,
"loss": 0.9606,
"step": 1008
},
{
"epoch": 1.43,
"grad_norm": 0.6895302534103394,
"learning_rate": 0.0001285308729595458,
"loss": 0.9052,
"step": 1009
},
{
"epoch": 1.43,
"grad_norm": 0.7161164879798889,
"learning_rate": 0.00012845990063875087,
"loss": 0.9784,
"step": 1010
},
{
"epoch": 1.43,
"grad_norm": 0.7340686321258545,
"learning_rate": 0.000128388928317956,
"loss": 0.9954,
"step": 1011
},
{
"epoch": 1.43,
"grad_norm": 0.7402980923652649,
"learning_rate": 0.00012831795599716112,
"loss": 0.9905,
"step": 1012
},
{
"epoch": 1.44,
"grad_norm": 0.6780891418457031,
"learning_rate": 0.00012824698367636624,
"loss": 0.8967,
"step": 1013
},
{
"epoch": 1.44,
"grad_norm": 0.6960120797157288,
"learning_rate": 0.00012817601135557132,
"loss": 0.8983,
"step": 1014
},
{
"epoch": 1.44,
"grad_norm": 0.7087661623954773,
"learning_rate": 0.00012810503903477644,
"loss": 0.927,
"step": 1015
},
{
"epoch": 1.44,
"grad_norm": 0.747851550579071,
"learning_rate": 0.00012803406671398155,
"loss": 1.0698,
"step": 1016
},
{
"epoch": 1.44,
"grad_norm": 0.7293078899383545,
"learning_rate": 0.00012796309439318666,
"loss": 1.007,
"step": 1017
},
{
"epoch": 1.44,
"grad_norm": 0.7039139270782471,
"learning_rate": 0.00012789212207239177,
"loss": 0.8827,
"step": 1018
},
{
"epoch": 1.44,
"grad_norm": 0.7060878276824951,
"learning_rate": 0.0001278211497515969,
"loss": 0.9078,
"step": 1019
},
{
"epoch": 1.45,
"grad_norm": 0.7231726050376892,
"learning_rate": 0.000127750177430802,
"loss": 0.9605,
"step": 1020
},
{
"epoch": 1.45,
"grad_norm": 0.7151123285293579,
"learning_rate": 0.0001276792051100071,
"loss": 0.9377,
"step": 1021
},
{
"epoch": 1.45,
"grad_norm": 0.7285674810409546,
"learning_rate": 0.00012760823278921223,
"loss": 0.959,
"step": 1022
},
{
"epoch": 1.45,
"grad_norm": 0.7382057309150696,
"learning_rate": 0.0001275372604684173,
"loss": 0.9733,
"step": 1023
},
{
"epoch": 1.45,
"grad_norm": 0.7037867307662964,
"learning_rate": 0.00012746628814762242,
"loss": 0.9261,
"step": 1024
},
{
"epoch": 1.45,
"grad_norm": 0.7247341275215149,
"learning_rate": 0.00012739531582682754,
"loss": 0.9765,
"step": 1025
},
{
"epoch": 1.45,
"grad_norm": 0.73041170835495,
"learning_rate": 0.00012732434350603265,
"loss": 0.956,
"step": 1026
},
{
"epoch": 1.45,
"grad_norm": 0.6923772096633911,
"learning_rate": 0.00012725337118523776,
"loss": 1.0133,
"step": 1027
},
{
"epoch": 1.46,
"grad_norm": 0.6898492574691772,
"learning_rate": 0.00012718239886444287,
"loss": 1.0216,
"step": 1028
},
{
"epoch": 1.46,
"grad_norm": 0.7031357288360596,
"learning_rate": 0.000127111426543648,
"loss": 0.9667,
"step": 1029
},
{
"epoch": 1.46,
"grad_norm": 0.7023702263832092,
"learning_rate": 0.0001270404542228531,
"loss": 0.9699,
"step": 1030
},
{
"epoch": 1.46,
"grad_norm": 0.6999973654747009,
"learning_rate": 0.00012696948190205819,
"loss": 0.9927,
"step": 1031
},
{
"epoch": 1.46,
"grad_norm": 0.7087770104408264,
"learning_rate": 0.0001268985095812633,
"loss": 0.8973,
"step": 1032
},
{
"epoch": 1.46,
"grad_norm": 0.7190529704093933,
"learning_rate": 0.00012682753726046844,
"loss": 0.9662,
"step": 1033
},
{
"epoch": 1.46,
"grad_norm": 0.7416104674339294,
"learning_rate": 0.00012675656493967355,
"loss": 1.0066,
"step": 1034
},
{
"epoch": 1.47,
"grad_norm": 0.7032634615898132,
"learning_rate": 0.00012668559261887864,
"loss": 0.9999,
"step": 1035
},
{
"epoch": 1.47,
"grad_norm": 0.7097875475883484,
"learning_rate": 0.00012661462029808375,
"loss": 0.9751,
"step": 1036
},
{
"epoch": 1.47,
"grad_norm": 0.6958321928977966,
"learning_rate": 0.00012654364797728886,
"loss": 0.9407,
"step": 1037
},
{
"epoch": 1.47,
"grad_norm": 0.7388033270835876,
"learning_rate": 0.00012647267565649397,
"loss": 1.0527,
"step": 1038
},
{
"epoch": 1.47,
"grad_norm": 0.6906083226203918,
"learning_rate": 0.00012640170333569906,
"loss": 0.9368,
"step": 1039
},
{
"epoch": 1.47,
"grad_norm": 0.7116028666496277,
"learning_rate": 0.0001263307310149042,
"loss": 0.9618,
"step": 1040
},
{
"epoch": 1.47,
"grad_norm": 0.7041985392570496,
"learning_rate": 0.0001262597586941093,
"loss": 0.9927,
"step": 1041
},
{
"epoch": 1.48,
"grad_norm": 0.7428408861160278,
"learning_rate": 0.00012618878637331443,
"loss": 1.0282,
"step": 1042
},
{
"epoch": 1.48,
"grad_norm": 0.6752524375915527,
"learning_rate": 0.0001261178140525195,
"loss": 0.927,
"step": 1043
},
{
"epoch": 1.48,
"grad_norm": 0.7071763277053833,
"learning_rate": 0.00012604684173172462,
"loss": 0.9534,
"step": 1044
},
{
"epoch": 1.48,
"grad_norm": 0.710394561290741,
"learning_rate": 0.00012597586941092974,
"loss": 0.9324,
"step": 1045
},
{
"epoch": 1.48,
"grad_norm": 0.7077054381370544,
"learning_rate": 0.00012590489709013485,
"loss": 0.9826,
"step": 1046
},
{
"epoch": 1.48,
"grad_norm": 0.7074711918830872,
"learning_rate": 0.00012583392476933996,
"loss": 0.9878,
"step": 1047
},
{
"epoch": 1.48,
"grad_norm": 0.6914960145950317,
"learning_rate": 0.00012576295244854507,
"loss": 1.0357,
"step": 1048
},
{
"epoch": 1.49,
"grad_norm": 0.6889827251434326,
"learning_rate": 0.0001256919801277502,
"loss": 0.982,
"step": 1049
},
{
"epoch": 1.49,
"grad_norm": 0.6742181777954102,
"learning_rate": 0.0001256210078069553,
"loss": 0.9513,
"step": 1050
},
{
"epoch": 1.49,
"grad_norm": 0.6830121874809265,
"learning_rate": 0.0001255500354861604,
"loss": 0.9961,
"step": 1051
},
{
"epoch": 1.49,
"grad_norm": 0.7067031264305115,
"learning_rate": 0.0001254790631653655,
"loss": 0.9623,
"step": 1052
},
{
"epoch": 1.49,
"grad_norm": 0.6789557933807373,
"learning_rate": 0.0001254080908445706,
"loss": 0.9556,
"step": 1053
},
{
"epoch": 1.49,
"grad_norm": 0.7340845465660095,
"learning_rate": 0.00012533711852377575,
"loss": 1.0122,
"step": 1054
},
{
"epoch": 1.49,
"grad_norm": 0.7131407260894775,
"learning_rate": 0.00012526614620298084,
"loss": 0.9998,
"step": 1055
},
{
"epoch": 1.5,
"grad_norm": 0.7129774689674377,
"learning_rate": 0.00012519517388218595,
"loss": 0.9348,
"step": 1056
},
{
"epoch": 1.5,
"grad_norm": 0.7226494550704956,
"learning_rate": 0.00012512420156139106,
"loss": 0.9792,
"step": 1057
},
{
"epoch": 1.5,
"grad_norm": 0.7202288508415222,
"learning_rate": 0.00012505322924059617,
"loss": 0.9546,
"step": 1058
},
{
"epoch": 1.5,
"grad_norm": 0.7222435474395752,
"learning_rate": 0.0001249822569198013,
"loss": 0.9995,
"step": 1059
},
{
"epoch": 1.5,
"grad_norm": 0.7435225248336792,
"learning_rate": 0.00012491128459900637,
"loss": 1.033,
"step": 1060
},
{
"epoch": 1.5,
"grad_norm": 0.699711799621582,
"learning_rate": 0.0001248403122782115,
"loss": 0.9639,
"step": 1061
},
{
"epoch": 1.5,
"grad_norm": 0.7038230895996094,
"learning_rate": 0.00012476933995741663,
"loss": 0.9198,
"step": 1062
},
{
"epoch": 1.51,
"grad_norm": 0.7003630995750427,
"learning_rate": 0.00012469836763662174,
"loss": 1.0208,
"step": 1063
},
{
"epoch": 1.51,
"grad_norm": 0.7104167938232422,
"learning_rate": 0.00012462739531582682,
"loss": 0.986,
"step": 1064
},
{
"epoch": 1.51,
"grad_norm": 0.7140862941741943,
"learning_rate": 0.00012455642299503194,
"loss": 0.9822,
"step": 1065
},
{
"epoch": 1.51,
"grad_norm": 0.7276419401168823,
"learning_rate": 0.00012448545067423705,
"loss": 1.0231,
"step": 1066
},
{
"epoch": 1.51,
"grad_norm": 0.7366431355476379,
"learning_rate": 0.00012441447835344216,
"loss": 1.0071,
"step": 1067
},
{
"epoch": 1.51,
"grad_norm": 0.7350658178329468,
"learning_rate": 0.00012434350603264727,
"loss": 0.9918,
"step": 1068
},
{
"epoch": 1.51,
"grad_norm": 0.6769629716873169,
"learning_rate": 0.0001242725337118524,
"loss": 0.9973,
"step": 1069
},
{
"epoch": 1.52,
"grad_norm": 0.7047297358512878,
"learning_rate": 0.0001242015613910575,
"loss": 1.0167,
"step": 1070
},
{
"epoch": 1.52,
"grad_norm": 0.6941127181053162,
"learning_rate": 0.0001241305890702626,
"loss": 0.9861,
"step": 1071
},
{
"epoch": 1.52,
"grad_norm": 0.727709949016571,
"learning_rate": 0.0001240596167494677,
"loss": 1.0325,
"step": 1072
},
{
"epoch": 1.52,
"grad_norm": 0.7129109501838684,
"learning_rate": 0.0001239886444286728,
"loss": 0.9249,
"step": 1073
},
{
"epoch": 1.52,
"grad_norm": 0.6976962089538574,
"learning_rate": 0.00012391767210787792,
"loss": 0.9472,
"step": 1074
},
{
"epoch": 1.52,
"grad_norm": 0.7473706603050232,
"learning_rate": 0.00012384669978708306,
"loss": 0.9843,
"step": 1075
},
{
"epoch": 1.52,
"grad_norm": 0.7451398968696594,
"learning_rate": 0.00012377572746628815,
"loss": 0.9926,
"step": 1076
},
{
"epoch": 1.53,
"grad_norm": 0.7221840023994446,
"learning_rate": 0.00012370475514549326,
"loss": 0.9513,
"step": 1077
},
{
"epoch": 1.53,
"grad_norm": 0.7023710012435913,
"learning_rate": 0.00012363378282469838,
"loss": 0.8927,
"step": 1078
},
{
"epoch": 1.53,
"grad_norm": 0.6784040331840515,
"learning_rate": 0.0001235628105039035,
"loss": 0.9059,
"step": 1079
},
{
"epoch": 1.53,
"grad_norm": 0.7393279671669006,
"learning_rate": 0.0001234918381831086,
"loss": 1.0369,
"step": 1080
},
{
"epoch": 1.53,
"grad_norm": 0.6987690329551697,
"learning_rate": 0.00012342086586231369,
"loss": 0.961,
"step": 1081
},
{
"epoch": 1.53,
"grad_norm": 0.7038789987564087,
"learning_rate": 0.00012334989354151883,
"loss": 1.0484,
"step": 1082
},
{
"epoch": 1.53,
"grad_norm": 0.684845507144928,
"learning_rate": 0.00012327892122072394,
"loss": 0.9604,
"step": 1083
},
{
"epoch": 1.54,
"grad_norm": 0.7157427072525024,
"learning_rate": 0.00012320794889992902,
"loss": 0.9702,
"step": 1084
},
{
"epoch": 1.54,
"grad_norm": 0.7225085496902466,
"learning_rate": 0.00012313697657913414,
"loss": 1.0421,
"step": 1085
},
{
"epoch": 1.54,
"grad_norm": 0.7261961698532104,
"learning_rate": 0.00012306600425833925,
"loss": 1.0224,
"step": 1086
},
{
"epoch": 1.54,
"grad_norm": 0.6956989765167236,
"learning_rate": 0.00012299503193754436,
"loss": 0.9708,
"step": 1087
},
{
"epoch": 1.54,
"grad_norm": 0.7316460609436035,
"learning_rate": 0.00012292405961674948,
"loss": 1.0543,
"step": 1088
},
{
"epoch": 1.54,
"grad_norm": 0.7196297645568848,
"learning_rate": 0.0001228530872959546,
"loss": 1.0021,
"step": 1089
},
{
"epoch": 1.54,
"grad_norm": 0.7039411664009094,
"learning_rate": 0.0001227821149751597,
"loss": 0.9563,
"step": 1090
},
{
"epoch": 1.55,
"grad_norm": 0.719147801399231,
"learning_rate": 0.0001227111426543648,
"loss": 1.0067,
"step": 1091
},
{
"epoch": 1.55,
"grad_norm": 0.6809403300285339,
"learning_rate": 0.00012264017033356993,
"loss": 0.995,
"step": 1092
},
{
"epoch": 1.55,
"grad_norm": 0.7004520297050476,
"learning_rate": 0.000122569198012775,
"loss": 0.9479,
"step": 1093
},
{
"epoch": 1.55,
"grad_norm": 0.7003827691078186,
"learning_rate": 0.00012249822569198012,
"loss": 0.9724,
"step": 1094
},
{
"epoch": 1.55,
"grad_norm": 0.7075307965278625,
"learning_rate": 0.00012242725337118524,
"loss": 0.9856,
"step": 1095
},
{
"epoch": 1.55,
"grad_norm": 0.723334789276123,
"learning_rate": 0.00012235628105039038,
"loss": 0.9768,
"step": 1096
},
{
"epoch": 1.55,
"grad_norm": 0.7285984754562378,
"learning_rate": 0.00012228530872959546,
"loss": 1.005,
"step": 1097
},
{
"epoch": 1.56,
"grad_norm": 0.7101818919181824,
"learning_rate": 0.00012221433640880058,
"loss": 0.9543,
"step": 1098
},
{
"epoch": 1.56,
"grad_norm": 0.749718427658081,
"learning_rate": 0.0001221433640880057,
"loss": 1.0076,
"step": 1099
},
{
"epoch": 1.56,
"grad_norm": 0.7114327549934387,
"learning_rate": 0.0001220723917672108,
"loss": 0.9289,
"step": 1100
},
{
"epoch": 1.56,
"grad_norm": 0.7335402965545654,
"learning_rate": 0.0001220014194464159,
"loss": 0.9761,
"step": 1101
},
{
"epoch": 1.56,
"grad_norm": 0.7315895557403564,
"learning_rate": 0.000121930447125621,
"loss": 0.9498,
"step": 1102
},
{
"epoch": 1.56,
"grad_norm": 0.7099215984344482,
"learning_rate": 0.00012185947480482611,
"loss": 0.9575,
"step": 1103
},
{
"epoch": 1.56,
"grad_norm": 0.7005743384361267,
"learning_rate": 0.00012178850248403124,
"loss": 0.9605,
"step": 1104
},
{
"epoch": 1.57,
"grad_norm": 0.7421797513961792,
"learning_rate": 0.00012171753016323635,
"loss": 1.067,
"step": 1105
},
{
"epoch": 1.57,
"grad_norm": 0.7173910737037659,
"learning_rate": 0.00012164655784244145,
"loss": 0.9843,
"step": 1106
},
{
"epoch": 1.57,
"grad_norm": 0.6898432374000549,
"learning_rate": 0.00012157558552164656,
"loss": 0.9735,
"step": 1107
},
{
"epoch": 1.57,
"grad_norm": 0.708389937877655,
"learning_rate": 0.00012150461320085168,
"loss": 0.9497,
"step": 1108
},
{
"epoch": 1.57,
"grad_norm": 0.7111474275588989,
"learning_rate": 0.00012143364088005677,
"loss": 0.8981,
"step": 1109
},
{
"epoch": 1.57,
"grad_norm": 0.6907044053077698,
"learning_rate": 0.00012136266855926189,
"loss": 0.9711,
"step": 1110
},
{
"epoch": 1.57,
"grad_norm": 0.744263231754303,
"learning_rate": 0.00012129169623846701,
"loss": 0.9874,
"step": 1111
},
{
"epoch": 1.58,
"grad_norm": 0.7199329733848572,
"learning_rate": 0.00012122072391767211,
"loss": 0.9704,
"step": 1112
},
{
"epoch": 1.58,
"grad_norm": 0.7198705077171326,
"learning_rate": 0.00012114975159687723,
"loss": 0.9508,
"step": 1113
},
{
"epoch": 1.58,
"grad_norm": 0.7411786913871765,
"learning_rate": 0.00012107877927608234,
"loss": 0.9998,
"step": 1114
},
{
"epoch": 1.58,
"grad_norm": 0.675391435623169,
"learning_rate": 0.00012100780695528744,
"loss": 0.9317,
"step": 1115
},
{
"epoch": 1.58,
"grad_norm": 0.6812769174575806,
"learning_rate": 0.00012093683463449255,
"loss": 0.889,
"step": 1116
},
{
"epoch": 1.58,
"grad_norm": 0.7100497484207153,
"learning_rate": 0.00012086586231369765,
"loss": 1.0161,
"step": 1117
},
{
"epoch": 1.58,
"grad_norm": 0.7073559761047363,
"learning_rate": 0.00012079488999290278,
"loss": 0.9373,
"step": 1118
},
{
"epoch": 1.59,
"grad_norm": 0.7181984186172485,
"learning_rate": 0.00012072391767210789,
"loss": 1.0138,
"step": 1119
},
{
"epoch": 1.59,
"grad_norm": 0.7441282272338867,
"learning_rate": 0.000120652945351313,
"loss": 1.0439,
"step": 1120
},
{
"epoch": 1.59,
"grad_norm": 0.7104730606079102,
"learning_rate": 0.0001205819730305181,
"loss": 1.0055,
"step": 1121
},
{
"epoch": 1.59,
"grad_norm": 0.7464049458503723,
"learning_rate": 0.00012051100070972321,
"loss": 1.011,
"step": 1122
},
{
"epoch": 1.59,
"grad_norm": 0.7502149939537048,
"learning_rate": 0.00012044002838892831,
"loss": 0.9707,
"step": 1123
},
{
"epoch": 1.59,
"grad_norm": 0.7351347804069519,
"learning_rate": 0.00012036905606813342,
"loss": 0.9797,
"step": 1124
},
{
"epoch": 1.59,
"grad_norm": 0.7235199809074402,
"learning_rate": 0.00012029808374733855,
"loss": 1.0525,
"step": 1125
},
{
"epoch": 1.6,
"grad_norm": 0.6959457993507385,
"learning_rate": 0.00012022711142654366,
"loss": 0.9818,
"step": 1126
},
{
"epoch": 1.6,
"grad_norm": 0.7070314288139343,
"learning_rate": 0.00012015613910574876,
"loss": 1.0983,
"step": 1127
},
{
"epoch": 1.6,
"grad_norm": 0.7202725410461426,
"learning_rate": 0.00012008516678495388,
"loss": 1.046,
"step": 1128
},
{
"epoch": 1.6,
"grad_norm": 0.6858893632888794,
"learning_rate": 0.00012001419446415898,
"loss": 0.8999,
"step": 1129
},
{
"epoch": 1.6,
"grad_norm": 0.7225475907325745,
"learning_rate": 0.00011994322214336409,
"loss": 1.0171,
"step": 1130
},
{
"epoch": 1.6,
"grad_norm": 0.7360539436340332,
"learning_rate": 0.00011987224982256919,
"loss": 1.047,
"step": 1131
},
{
"epoch": 1.6,
"grad_norm": 0.7139810919761658,
"learning_rate": 0.00011980127750177433,
"loss": 1.008,
"step": 1132
},
{
"epoch": 1.61,
"grad_norm": 0.7204037308692932,
"learning_rate": 0.00011973030518097943,
"loss": 0.9262,
"step": 1133
},
{
"epoch": 1.61,
"grad_norm": 0.7312289476394653,
"learning_rate": 0.00011965933286018454,
"loss": 1.0079,
"step": 1134
},
{
"epoch": 1.61,
"grad_norm": 0.7065761685371399,
"learning_rate": 0.00011958836053938964,
"loss": 0.9956,
"step": 1135
},
{
"epoch": 1.61,
"grad_norm": 0.743116557598114,
"learning_rate": 0.00011951738821859475,
"loss": 0.9679,
"step": 1136
},
{
"epoch": 1.61,
"grad_norm": 0.7406318783760071,
"learning_rate": 0.00011944641589779986,
"loss": 1.0024,
"step": 1137
},
{
"epoch": 1.61,
"grad_norm": 0.7344107031822205,
"learning_rate": 0.00011937544357700496,
"loss": 0.9899,
"step": 1138
},
{
"epoch": 1.61,
"grad_norm": 0.7228212356567383,
"learning_rate": 0.00011930447125621009,
"loss": 0.9902,
"step": 1139
},
{
"epoch": 1.62,
"grad_norm": 0.7286756634712219,
"learning_rate": 0.0001192334989354152,
"loss": 1.0239,
"step": 1140
},
{
"epoch": 1.62,
"grad_norm": 0.7067569494247437,
"learning_rate": 0.0001191625266146203,
"loss": 0.95,
"step": 1141
},
{
"epoch": 1.62,
"grad_norm": 0.7505934238433838,
"learning_rate": 0.00011909155429382541,
"loss": 1.0528,
"step": 1142
},
{
"epoch": 1.62,
"grad_norm": 0.6888457536697388,
"learning_rate": 0.00011902058197303053,
"loss": 0.9544,
"step": 1143
},
{
"epoch": 1.62,
"grad_norm": 0.6858482956886292,
"learning_rate": 0.00011894960965223563,
"loss": 0.9525,
"step": 1144
},
{
"epoch": 1.62,
"grad_norm": 0.722373902797699,
"learning_rate": 0.00011887863733144074,
"loss": 1.047,
"step": 1145
},
{
"epoch": 1.62,
"grad_norm": 0.7318529486656189,
"learning_rate": 0.00011880766501064586,
"loss": 0.9955,
"step": 1146
},
{
"epoch": 1.62,
"grad_norm": 0.7528226375579834,
"learning_rate": 0.00011873669268985096,
"loss": 0.926,
"step": 1147
},
{
"epoch": 1.63,
"grad_norm": 0.7202761173248291,
"learning_rate": 0.00011866572036905608,
"loss": 0.9099,
"step": 1148
},
{
"epoch": 1.63,
"grad_norm": 0.7594791054725647,
"learning_rate": 0.00011859474804826119,
"loss": 1.0122,
"step": 1149
},
{
"epoch": 1.63,
"grad_norm": 0.7222248315811157,
"learning_rate": 0.00011852377572746629,
"loss": 0.9161,
"step": 1150
},
{
"epoch": 1.63,
"grad_norm": 0.7424381971359253,
"learning_rate": 0.0001184528034066714,
"loss": 0.9357,
"step": 1151
},
{
"epoch": 1.63,
"grad_norm": 0.7474575638771057,
"learning_rate": 0.0001183818310858765,
"loss": 0.9426,
"step": 1152
},
{
"epoch": 1.63,
"grad_norm": 0.7351663112640381,
"learning_rate": 0.00011831085876508164,
"loss": 0.954,
"step": 1153
},
{
"epoch": 1.63,
"grad_norm": 0.6973652839660645,
"learning_rate": 0.00011823988644428674,
"loss": 0.9697,
"step": 1154
},
{
"epoch": 1.64,
"grad_norm": 0.7030205726623535,
"learning_rate": 0.00011816891412349185,
"loss": 0.9262,
"step": 1155
},
{
"epoch": 1.64,
"grad_norm": 0.6983229517936707,
"learning_rate": 0.00011809794180269695,
"loss": 0.9387,
"step": 1156
},
{
"epoch": 1.64,
"grad_norm": 0.7123252153396606,
"learning_rate": 0.00011802696948190206,
"loss": 1.0255,
"step": 1157
},
{
"epoch": 1.64,
"grad_norm": 0.7124939560890198,
"learning_rate": 0.00011795599716110716,
"loss": 0.953,
"step": 1158
},
{
"epoch": 1.64,
"grad_norm": 0.6996143460273743,
"learning_rate": 0.00011788502484031228,
"loss": 0.9785,
"step": 1159
},
{
"epoch": 1.64,
"grad_norm": 0.7493870854377747,
"learning_rate": 0.0001178140525195174,
"loss": 0.9201,
"step": 1160
},
{
"epoch": 1.64,
"grad_norm": 0.7479891777038574,
"learning_rate": 0.00011774308019872251,
"loss": 0.9566,
"step": 1161
},
{
"epoch": 1.65,
"grad_norm": 0.7288089394569397,
"learning_rate": 0.00011767210787792761,
"loss": 1.004,
"step": 1162
},
{
"epoch": 1.65,
"grad_norm": 0.7156316041946411,
"learning_rate": 0.00011760113555713273,
"loss": 0.9416,
"step": 1163
},
{
"epoch": 1.65,
"grad_norm": 0.7374414801597595,
"learning_rate": 0.00011753016323633783,
"loss": 1.0313,
"step": 1164
},
{
"epoch": 1.65,
"grad_norm": 0.6988320350646973,
"learning_rate": 0.00011745919091554294,
"loss": 0.9442,
"step": 1165
},
{
"epoch": 1.65,
"grad_norm": 0.7388788461685181,
"learning_rate": 0.00011738821859474805,
"loss": 1.0254,
"step": 1166
},
{
"epoch": 1.65,
"grad_norm": 0.6990705132484436,
"learning_rate": 0.00011731724627395318,
"loss": 0.9383,
"step": 1167
},
{
"epoch": 1.65,
"grad_norm": 0.698650598526001,
"learning_rate": 0.00011724627395315828,
"loss": 0.9462,
"step": 1168
},
{
"epoch": 1.66,
"grad_norm": 0.7219197154045105,
"learning_rate": 0.00011717530163236339,
"loss": 0.9926,
"step": 1169
},
{
"epoch": 1.66,
"grad_norm": 0.7224728465080261,
"learning_rate": 0.00011710432931156849,
"loss": 0.9425,
"step": 1170
},
{
"epoch": 1.66,
"grad_norm": 0.7238569259643555,
"learning_rate": 0.0001170333569907736,
"loss": 0.9901,
"step": 1171
},
{
"epoch": 1.66,
"grad_norm": 0.7193832993507385,
"learning_rate": 0.00011696238466997871,
"loss": 0.9147,
"step": 1172
},
{
"epoch": 1.66,
"grad_norm": 0.7369004487991333,
"learning_rate": 0.00011689141234918381,
"loss": 0.9768,
"step": 1173
},
{
"epoch": 1.66,
"grad_norm": 0.7380697131156921,
"learning_rate": 0.00011682044002838894,
"loss": 0.9593,
"step": 1174
},
{
"epoch": 1.66,
"grad_norm": 0.737091064453125,
"learning_rate": 0.00011674946770759405,
"loss": 1.022,
"step": 1175
},
{
"epoch": 1.67,
"grad_norm": 0.7183380722999573,
"learning_rate": 0.00011667849538679916,
"loss": 0.9548,
"step": 1176
},
{
"epoch": 1.67,
"grad_norm": 0.7147498726844788,
"learning_rate": 0.00011660752306600426,
"loss": 0.9307,
"step": 1177
},
{
"epoch": 1.67,
"grad_norm": 0.7256461381912231,
"learning_rate": 0.00011653655074520938,
"loss": 0.9884,
"step": 1178
},
{
"epoch": 1.67,
"grad_norm": 0.701350212097168,
"learning_rate": 0.00011646557842441448,
"loss": 0.9209,
"step": 1179
},
{
"epoch": 1.67,
"grad_norm": 0.7127419710159302,
"learning_rate": 0.00011639460610361959,
"loss": 0.9475,
"step": 1180
},
{
"epoch": 1.67,
"grad_norm": 0.7424458265304565,
"learning_rate": 0.00011632363378282471,
"loss": 1.0199,
"step": 1181
},
{
"epoch": 1.67,
"grad_norm": 0.7015305757522583,
"learning_rate": 0.00011625266146202983,
"loss": 0.9896,
"step": 1182
},
{
"epoch": 1.68,
"grad_norm": 0.7185436487197876,
"learning_rate": 0.00011618168914123493,
"loss": 0.9676,
"step": 1183
},
{
"epoch": 1.68,
"grad_norm": 0.7177726030349731,
"learning_rate": 0.00011611071682044004,
"loss": 0.973,
"step": 1184
},
{
"epoch": 1.68,
"grad_norm": 0.7331061959266663,
"learning_rate": 0.00011603974449964514,
"loss": 0.9516,
"step": 1185
},
{
"epoch": 1.68,
"grad_norm": 0.7594481706619263,
"learning_rate": 0.00011596877217885025,
"loss": 1.011,
"step": 1186
},
{
"epoch": 1.68,
"grad_norm": 0.723116397857666,
"learning_rate": 0.00011589779985805535,
"loss": 0.9492,
"step": 1187
},
{
"epoch": 1.68,
"grad_norm": 0.773428738117218,
"learning_rate": 0.00011582682753726049,
"loss": 0.9604,
"step": 1188
},
{
"epoch": 1.68,
"grad_norm": 0.7496587634086609,
"learning_rate": 0.00011575585521646559,
"loss": 1.0742,
"step": 1189
},
{
"epoch": 1.69,
"grad_norm": 0.7052460312843323,
"learning_rate": 0.0001156848828956707,
"loss": 0.9653,
"step": 1190
},
{
"epoch": 1.69,
"grad_norm": 0.716080904006958,
"learning_rate": 0.0001156139105748758,
"loss": 1.0207,
"step": 1191
},
{
"epoch": 1.69,
"grad_norm": 0.7135874032974243,
"learning_rate": 0.00011554293825408091,
"loss": 0.9421,
"step": 1192
},
{
"epoch": 1.69,
"grad_norm": 0.7133678197860718,
"learning_rate": 0.00011547196593328601,
"loss": 0.9889,
"step": 1193
},
{
"epoch": 1.69,
"grad_norm": 0.711086630821228,
"learning_rate": 0.00011540099361249113,
"loss": 0.9795,
"step": 1194
},
{
"epoch": 1.69,
"grad_norm": 0.716465413570404,
"learning_rate": 0.00011533002129169624,
"loss": 1.0216,
"step": 1195
},
{
"epoch": 1.69,
"grad_norm": 0.7101050615310669,
"learning_rate": 0.00011525904897090137,
"loss": 0.965,
"step": 1196
},
{
"epoch": 1.7,
"grad_norm": 0.7278016209602356,
"learning_rate": 0.00011518807665010646,
"loss": 0.9736,
"step": 1197
},
{
"epoch": 1.7,
"grad_norm": 0.6872366666793823,
"learning_rate": 0.00011511710432931158,
"loss": 0.9184,
"step": 1198
},
{
"epoch": 1.7,
"grad_norm": 0.6575968861579895,
"learning_rate": 0.00011504613200851668,
"loss": 0.8861,
"step": 1199
},
{
"epoch": 1.7,
"grad_norm": 0.7604357004165649,
"learning_rate": 0.00011497515968772179,
"loss": 0.9599,
"step": 1200
},
{
"epoch": 1.7,
"grad_norm": 0.7085766792297363,
"learning_rate": 0.0001149041873669269,
"loss": 0.9557,
"step": 1201
},
{
"epoch": 1.7,
"grad_norm": 0.7655233144760132,
"learning_rate": 0.000114833215046132,
"loss": 0.969,
"step": 1202
},
{
"epoch": 1.7,
"grad_norm": 0.7449689507484436,
"learning_rate": 0.00011476224272533713,
"loss": 1.0241,
"step": 1203
},
{
"epoch": 1.71,
"grad_norm": 0.7180941700935364,
"learning_rate": 0.00011469127040454224,
"loss": 1.0062,
"step": 1204
},
{
"epoch": 1.71,
"grad_norm": 0.7065799236297607,
"learning_rate": 0.00011462029808374735,
"loss": 0.8982,
"step": 1205
},
{
"epoch": 1.71,
"grad_norm": 0.7072206735610962,
"learning_rate": 0.00011454932576295245,
"loss": 0.9872,
"step": 1206
},
{
"epoch": 1.71,
"grad_norm": 0.7332132458686829,
"learning_rate": 0.00011447835344215756,
"loss": 1.0426,
"step": 1207
},
{
"epoch": 1.71,
"grad_norm": 0.6793428063392639,
"learning_rate": 0.00011440738112136266,
"loss": 1.003,
"step": 1208
},
{
"epoch": 1.71,
"grad_norm": 0.6779395341873169,
"learning_rate": 0.00011433640880056778,
"loss": 0.8887,
"step": 1209
},
{
"epoch": 1.71,
"grad_norm": 0.7192417979240417,
"learning_rate": 0.0001142654364797729,
"loss": 1.0065,
"step": 1210
},
{
"epoch": 1.72,
"grad_norm": 0.7286444902420044,
"learning_rate": 0.00011419446415897802,
"loss": 0.9387,
"step": 1211
},
{
"epoch": 1.72,
"grad_norm": 0.7276972532272339,
"learning_rate": 0.00011412349183818311,
"loss": 0.9405,
"step": 1212
},
{
"epoch": 1.72,
"grad_norm": 0.7740305066108704,
"learning_rate": 0.00011405251951738823,
"loss": 1.0154,
"step": 1213
},
{
"epoch": 1.72,
"grad_norm": 0.7302063703536987,
"learning_rate": 0.00011398154719659333,
"loss": 0.9258,
"step": 1214
},
{
"epoch": 1.72,
"grad_norm": 0.7404993772506714,
"learning_rate": 0.00011391057487579844,
"loss": 0.9767,
"step": 1215
},
{
"epoch": 1.72,
"grad_norm": 0.7460538744926453,
"learning_rate": 0.00011383960255500354,
"loss": 0.9811,
"step": 1216
},
{
"epoch": 1.72,
"grad_norm": 0.7067095041275024,
"learning_rate": 0.00011376863023420868,
"loss": 0.923,
"step": 1217
},
{
"epoch": 1.73,
"grad_norm": 0.7123129963874817,
"learning_rate": 0.00011369765791341378,
"loss": 1.0037,
"step": 1218
},
{
"epoch": 1.73,
"grad_norm": 0.6522353291511536,
"learning_rate": 0.00011362668559261889,
"loss": 0.9337,
"step": 1219
},
{
"epoch": 1.73,
"grad_norm": 0.7282575368881226,
"learning_rate": 0.00011355571327182399,
"loss": 1.0617,
"step": 1220
},
{
"epoch": 1.73,
"grad_norm": 0.6829158067703247,
"learning_rate": 0.0001134847409510291,
"loss": 0.8669,
"step": 1221
},
{
"epoch": 1.73,
"grad_norm": 0.7063448429107666,
"learning_rate": 0.0001134137686302342,
"loss": 0.931,
"step": 1222
},
{
"epoch": 1.73,
"grad_norm": 0.7179009318351746,
"learning_rate": 0.00011334279630943931,
"loss": 1.0027,
"step": 1223
},
{
"epoch": 1.73,
"grad_norm": 0.7192047834396362,
"learning_rate": 0.00011327182398864444,
"loss": 0.9713,
"step": 1224
},
{
"epoch": 1.74,
"grad_norm": 0.7517329454421997,
"learning_rate": 0.00011320085166784955,
"loss": 1.0261,
"step": 1225
},
{
"epoch": 1.74,
"grad_norm": 0.7560898661613464,
"learning_rate": 0.00011312987934705465,
"loss": 0.9925,
"step": 1226
},
{
"epoch": 1.74,
"grad_norm": 0.7354062795639038,
"learning_rate": 0.00011305890702625976,
"loss": 0.9544,
"step": 1227
},
{
"epoch": 1.74,
"grad_norm": 0.7111262679100037,
"learning_rate": 0.00011298793470546488,
"loss": 1.0094,
"step": 1228
},
{
"epoch": 1.74,
"grad_norm": 0.7077128291130066,
"learning_rate": 0.00011291696238466998,
"loss": 0.9743,
"step": 1229
},
{
"epoch": 1.74,
"grad_norm": 0.7007378339767456,
"learning_rate": 0.00011284599006387509,
"loss": 0.9428,
"step": 1230
},
{
"epoch": 1.74,
"grad_norm": 0.7295317053794861,
"learning_rate": 0.00011277501774308022,
"loss": 0.96,
"step": 1231
},
{
"epoch": 1.75,
"grad_norm": 0.7037018537521362,
"learning_rate": 0.00011270404542228531,
"loss": 0.939,
"step": 1232
},
{
"epoch": 1.75,
"grad_norm": 0.7029048204421997,
"learning_rate": 0.00011263307310149043,
"loss": 0.932,
"step": 1233
},
{
"epoch": 1.75,
"grad_norm": 0.711932361125946,
"learning_rate": 0.00011256210078069554,
"loss": 0.9009,
"step": 1234
},
{
"epoch": 1.75,
"grad_norm": 0.7566869854927063,
"learning_rate": 0.00011249112845990064,
"loss": 0.9681,
"step": 1235
},
{
"epoch": 1.75,
"grad_norm": 0.7486440539360046,
"learning_rate": 0.00011242015613910575,
"loss": 1.0618,
"step": 1236
},
{
"epoch": 1.75,
"grad_norm": 0.7305347323417664,
"learning_rate": 0.00011234918381831085,
"loss": 0.9537,
"step": 1237
},
{
"epoch": 1.75,
"grad_norm": 0.7390864491462708,
"learning_rate": 0.00011227821149751598,
"loss": 1.0045,
"step": 1238
},
{
"epoch": 1.76,
"grad_norm": 0.7143562436103821,
"learning_rate": 0.00011220723917672109,
"loss": 0.9666,
"step": 1239
},
{
"epoch": 1.76,
"grad_norm": 0.7177388668060303,
"learning_rate": 0.0001121362668559262,
"loss": 1.0068,
"step": 1240
},
{
"epoch": 1.76,
"grad_norm": 0.6928455233573914,
"learning_rate": 0.0001120652945351313,
"loss": 0.9245,
"step": 1241
},
{
"epoch": 1.76,
"grad_norm": 0.6976019144058228,
"learning_rate": 0.00011199432221433641,
"loss": 0.9042,
"step": 1242
},
{
"epoch": 1.76,
"grad_norm": 0.7117376327514648,
"learning_rate": 0.00011192334989354151,
"loss": 0.9605,
"step": 1243
},
{
"epoch": 1.76,
"grad_norm": 0.7036189436912537,
"learning_rate": 0.00011185237757274663,
"loss": 0.8431,
"step": 1244
},
{
"epoch": 1.76,
"grad_norm": 0.7574560046195984,
"learning_rate": 0.00011178140525195175,
"loss": 1.0275,
"step": 1245
},
{
"epoch": 1.77,
"grad_norm": 0.7326035499572754,
"learning_rate": 0.00011171043293115687,
"loss": 0.9939,
"step": 1246
},
{
"epoch": 1.77,
"grad_norm": 0.7244006395339966,
"learning_rate": 0.00011163946061036197,
"loss": 0.9466,
"step": 1247
},
{
"epoch": 1.77,
"grad_norm": 0.7348788380622864,
"learning_rate": 0.00011156848828956708,
"loss": 0.942,
"step": 1248
},
{
"epoch": 1.77,
"grad_norm": 0.7395305037498474,
"learning_rate": 0.00011149751596877218,
"loss": 1.0074,
"step": 1249
},
{
"epoch": 1.77,
"grad_norm": 0.7284185886383057,
"learning_rate": 0.00011142654364797729,
"loss": 0.9612,
"step": 1250
},
{
"epoch": 1.77,
"grad_norm": 0.6985504031181335,
"learning_rate": 0.00011135557132718239,
"loss": 0.9172,
"step": 1251
},
{
"epoch": 1.77,
"grad_norm": 0.7086307406425476,
"learning_rate": 0.00011128459900638753,
"loss": 1.0036,
"step": 1252
},
{
"epoch": 1.78,
"grad_norm": 0.7150089144706726,
"learning_rate": 0.00011121362668559263,
"loss": 0.9534,
"step": 1253
},
{
"epoch": 1.78,
"grad_norm": 0.7261559963226318,
"learning_rate": 0.00011114265436479774,
"loss": 0.9956,
"step": 1254
},
{
"epoch": 1.78,
"grad_norm": 0.7401515245437622,
"learning_rate": 0.00011107168204400284,
"loss": 0.959,
"step": 1255
},
{
"epoch": 1.78,
"grad_norm": 0.732208788394928,
"learning_rate": 0.00011100070972320795,
"loss": 1.0415,
"step": 1256
},
{
"epoch": 1.78,
"grad_norm": 0.7246513366699219,
"learning_rate": 0.00011092973740241307,
"loss": 0.9867,
"step": 1257
},
{
"epoch": 1.78,
"grad_norm": 0.7390275001525879,
"learning_rate": 0.00011085876508161816,
"loss": 0.9737,
"step": 1258
},
{
"epoch": 1.78,
"grad_norm": 0.7073413133621216,
"learning_rate": 0.00011078779276082329,
"loss": 1.011,
"step": 1259
},
{
"epoch": 1.79,
"grad_norm": 0.7103257775306702,
"learning_rate": 0.0001107168204400284,
"loss": 0.9457,
"step": 1260
},
{
"epoch": 1.79,
"grad_norm": 0.7341899871826172,
"learning_rate": 0.0001106458481192335,
"loss": 0.9708,
"step": 1261
},
{
"epoch": 1.79,
"grad_norm": 0.743951141834259,
"learning_rate": 0.00011057487579843862,
"loss": 0.9338,
"step": 1262
},
{
"epoch": 1.79,
"grad_norm": 0.7139862775802612,
"learning_rate": 0.00011050390347764373,
"loss": 0.9012,
"step": 1263
},
{
"epoch": 1.79,
"grad_norm": 0.802229642868042,
"learning_rate": 0.00011043293115684883,
"loss": 0.9703,
"step": 1264
},
{
"epoch": 1.79,
"grad_norm": 0.7592867612838745,
"learning_rate": 0.00011036195883605394,
"loss": 1.0162,
"step": 1265
},
{
"epoch": 1.79,
"grad_norm": 0.7308145761489868,
"learning_rate": 0.00011029098651525907,
"loss": 0.9908,
"step": 1266
},
{
"epoch": 1.79,
"grad_norm": 0.764416515827179,
"learning_rate": 0.00011022001419446417,
"loss": 1.0165,
"step": 1267
},
{
"epoch": 1.8,
"grad_norm": 0.7167296409606934,
"learning_rate": 0.00011014904187366928,
"loss": 1.0095,
"step": 1268
},
{
"epoch": 1.8,
"grad_norm": 0.6738303899765015,
"learning_rate": 0.00011007806955287439,
"loss": 0.8957,
"step": 1269
},
{
"epoch": 1.8,
"grad_norm": 0.7134904265403748,
"learning_rate": 0.00011000709723207949,
"loss": 0.9553,
"step": 1270
},
{
"epoch": 1.8,
"grad_norm": 0.6910573840141296,
"learning_rate": 0.0001099361249112846,
"loss": 0.9147,
"step": 1271
},
{
"epoch": 1.8,
"grad_norm": 0.7500548958778381,
"learning_rate": 0.0001098651525904897,
"loss": 1.0135,
"step": 1272
},
{
"epoch": 1.8,
"grad_norm": 0.7269773483276367,
"learning_rate": 0.00010979418026969484,
"loss": 1.0022,
"step": 1273
},
{
"epoch": 1.8,
"grad_norm": 0.7309441566467285,
"learning_rate": 0.00010972320794889994,
"loss": 0.9536,
"step": 1274
},
{
"epoch": 1.81,
"grad_norm": 0.7545249462127686,
"learning_rate": 0.00010965223562810505,
"loss": 0.9793,
"step": 1275
},
{
"epoch": 1.81,
"grad_norm": 0.7531616687774658,
"learning_rate": 0.00010958126330731015,
"loss": 0.8926,
"step": 1276
},
{
"epoch": 1.81,
"grad_norm": 0.7523400187492371,
"learning_rate": 0.00010951029098651527,
"loss": 0.9868,
"step": 1277
},
{
"epoch": 1.81,
"grad_norm": 0.7421838045120239,
"learning_rate": 0.00010943931866572036,
"loss": 0.9743,
"step": 1278
},
{
"epoch": 1.81,
"grad_norm": 0.7826520204544067,
"learning_rate": 0.00010936834634492548,
"loss": 1.042,
"step": 1279
},
{
"epoch": 1.81,
"grad_norm": 0.7460055351257324,
"learning_rate": 0.00010929737402413058,
"loss": 0.9605,
"step": 1280
},
{
"epoch": 1.81,
"grad_norm": 0.723533570766449,
"learning_rate": 0.00010922640170333572,
"loss": 1.0004,
"step": 1281
},
{
"epoch": 1.82,
"grad_norm": 0.7450777888298035,
"learning_rate": 0.00010915542938254082,
"loss": 1.058,
"step": 1282
},
{
"epoch": 1.82,
"grad_norm": 0.7143047451972961,
"learning_rate": 0.00010908445706174593,
"loss": 1.0019,
"step": 1283
},
{
"epoch": 1.82,
"grad_norm": 0.6794545650482178,
"learning_rate": 0.00010901348474095103,
"loss": 0.9604,
"step": 1284
},
{
"epoch": 1.82,
"grad_norm": 0.6825183629989624,
"learning_rate": 0.00010894251242015614,
"loss": 0.935,
"step": 1285
},
{
"epoch": 1.82,
"grad_norm": 0.6939642429351807,
"learning_rate": 0.00010887154009936125,
"loss": 1.0092,
"step": 1286
},
{
"epoch": 1.82,
"grad_norm": 0.7390894293785095,
"learning_rate": 0.00010880056777856635,
"loss": 1.0106,
"step": 1287
},
{
"epoch": 1.82,
"grad_norm": 0.740071177482605,
"learning_rate": 0.00010872959545777148,
"loss": 0.9024,
"step": 1288
},
{
"epoch": 1.83,
"grad_norm": 0.7248451709747314,
"learning_rate": 0.00010865862313697659,
"loss": 0.9534,
"step": 1289
},
{
"epoch": 1.83,
"grad_norm": 0.7937654852867126,
"learning_rate": 0.00010858765081618169,
"loss": 1.022,
"step": 1290
},
{
"epoch": 1.83,
"grad_norm": 0.7495038509368896,
"learning_rate": 0.0001085166784953868,
"loss": 0.8344,
"step": 1291
},
{
"epoch": 1.83,
"grad_norm": 0.7504763603210449,
"learning_rate": 0.00010844570617459192,
"loss": 0.9882,
"step": 1292
},
{
"epoch": 1.83,
"grad_norm": 0.7662994861602783,
"learning_rate": 0.00010837473385379701,
"loss": 0.9283,
"step": 1293
},
{
"epoch": 1.83,
"grad_norm": 0.7336791157722473,
"learning_rate": 0.00010830376153300213,
"loss": 0.9955,
"step": 1294
},
{
"epoch": 1.83,
"grad_norm": 0.7345770597457886,
"learning_rate": 0.00010823278921220725,
"loss": 0.9991,
"step": 1295
},
{
"epoch": 1.84,
"grad_norm": 0.6934396624565125,
"learning_rate": 0.00010816181689141235,
"loss": 0.8826,
"step": 1296
},
{
"epoch": 1.84,
"grad_norm": 0.7034685611724854,
"learning_rate": 0.00010809084457061747,
"loss": 0.9665,
"step": 1297
},
{
"epoch": 1.84,
"grad_norm": 0.6718670725822449,
"learning_rate": 0.00010801987224982258,
"loss": 0.8808,
"step": 1298
},
{
"epoch": 1.84,
"grad_norm": 0.7105706334114075,
"learning_rate": 0.00010794889992902768,
"loss": 0.9399,
"step": 1299
},
{
"epoch": 1.84,
"grad_norm": 0.7107860445976257,
"learning_rate": 0.00010787792760823279,
"loss": 0.9766,
"step": 1300
},
{
"epoch": 1.84,
"grad_norm": 0.7587505578994751,
"learning_rate": 0.00010780695528743789,
"loss": 0.993,
"step": 1301
},
{
"epoch": 1.84,
"grad_norm": 0.7261898517608643,
"learning_rate": 0.00010773598296664303,
"loss": 0.9075,
"step": 1302
},
{
"epoch": 1.85,
"grad_norm": 0.712391197681427,
"learning_rate": 0.00010766501064584813,
"loss": 0.991,
"step": 1303
},
{
"epoch": 1.85,
"grad_norm": 0.7336340546607971,
"learning_rate": 0.00010759403832505324,
"loss": 1.036,
"step": 1304
},
{
"epoch": 1.85,
"grad_norm": 0.7203515768051147,
"learning_rate": 0.00010752306600425834,
"loss": 1.0046,
"step": 1305
},
{
"epoch": 1.85,
"grad_norm": 0.723571240901947,
"learning_rate": 0.00010745209368346345,
"loss": 0.9865,
"step": 1306
},
{
"epoch": 1.85,
"grad_norm": 0.7366999983787537,
"learning_rate": 0.00010738112136266855,
"loss": 0.9563,
"step": 1307
},
{
"epoch": 1.85,
"grad_norm": 0.7270590662956238,
"learning_rate": 0.00010731014904187367,
"loss": 0.9038,
"step": 1308
},
{
"epoch": 1.85,
"grad_norm": 0.7648587226867676,
"learning_rate": 0.00010723917672107879,
"loss": 1.0028,
"step": 1309
},
{
"epoch": 1.86,
"grad_norm": 0.6916332244873047,
"learning_rate": 0.0001071682044002839,
"loss": 0.8921,
"step": 1310
},
{
"epoch": 1.86,
"grad_norm": 0.7208322882652283,
"learning_rate": 0.000107097232079489,
"loss": 0.9974,
"step": 1311
},
{
"epoch": 1.86,
"grad_norm": 0.7530882358551025,
"learning_rate": 0.00010702625975869412,
"loss": 0.9533,
"step": 1312
},
{
"epoch": 1.86,
"grad_norm": 0.7088322639465332,
"learning_rate": 0.00010695528743789922,
"loss": 0.9691,
"step": 1313
},
{
"epoch": 1.86,
"grad_norm": 0.7041926383972168,
"learning_rate": 0.00010688431511710433,
"loss": 0.9699,
"step": 1314
},
{
"epoch": 1.86,
"grad_norm": 0.7026035785675049,
"learning_rate": 0.00010681334279630944,
"loss": 1.0027,
"step": 1315
},
{
"epoch": 1.86,
"grad_norm": 0.7356511354446411,
"learning_rate": 0.00010674237047551457,
"loss": 0.9931,
"step": 1316
},
{
"epoch": 1.87,
"grad_norm": 0.7051964402198792,
"learning_rate": 0.00010667139815471967,
"loss": 0.9606,
"step": 1317
},
{
"epoch": 1.87,
"grad_norm": 0.7253301739692688,
"learning_rate": 0.00010660042583392478,
"loss": 1.0027,
"step": 1318
},
{
"epoch": 1.87,
"grad_norm": 0.7091968059539795,
"learning_rate": 0.00010652945351312988,
"loss": 0.9177,
"step": 1319
},
{
"epoch": 1.87,
"grad_norm": 0.7086256146430969,
"learning_rate": 0.00010645848119233499,
"loss": 0.9631,
"step": 1320
},
{
"epoch": 1.87,
"grad_norm": 0.7075737714767456,
"learning_rate": 0.0001063875088715401,
"loss": 0.9245,
"step": 1321
},
{
"epoch": 1.87,
"grad_norm": 0.733659565448761,
"learning_rate": 0.0001063165365507452,
"loss": 0.9595,
"step": 1322
},
{
"epoch": 1.87,
"grad_norm": 0.7235379815101624,
"learning_rate": 0.00010624556422995033,
"loss": 0.8914,
"step": 1323
},
{
"epoch": 1.88,
"grad_norm": 0.8256986141204834,
"learning_rate": 0.00010617459190915544,
"loss": 1.031,
"step": 1324
},
{
"epoch": 1.88,
"grad_norm": 0.7533237934112549,
"learning_rate": 0.00010610361958836055,
"loss": 0.977,
"step": 1325
},
{
"epoch": 1.88,
"grad_norm": 0.7689191699028015,
"learning_rate": 0.00010603264726756565,
"loss": 0.926,
"step": 1326
},
{
"epoch": 1.88,
"grad_norm": 0.7381359934806824,
"learning_rate": 0.00010596167494677077,
"loss": 0.9478,
"step": 1327
},
{
"epoch": 1.88,
"grad_norm": 0.7638797163963318,
"learning_rate": 0.00010589070262597587,
"loss": 0.9694,
"step": 1328
},
{
"epoch": 1.88,
"grad_norm": 0.7295525074005127,
"learning_rate": 0.00010581973030518098,
"loss": 1.0069,
"step": 1329
},
{
"epoch": 1.88,
"grad_norm": 0.7049389481544495,
"learning_rate": 0.0001057487579843861,
"loss": 0.9564,
"step": 1330
},
{
"epoch": 1.89,
"grad_norm": 0.7222816944122314,
"learning_rate": 0.00010567778566359122,
"loss": 0.9999,
"step": 1331
},
{
"epoch": 1.89,
"grad_norm": 0.7252581715583801,
"learning_rate": 0.00010560681334279632,
"loss": 0.9455,
"step": 1332
},
{
"epoch": 1.89,
"grad_norm": 0.7246282696723938,
"learning_rate": 0.00010553584102200143,
"loss": 0.9012,
"step": 1333
},
{
"epoch": 1.89,
"grad_norm": 0.6909769177436829,
"learning_rate": 0.00010546486870120653,
"loss": 0.9417,
"step": 1334
},
{
"epoch": 1.89,
"grad_norm": 0.7185254693031311,
"learning_rate": 0.00010539389638041164,
"loss": 0.9911,
"step": 1335
},
{
"epoch": 1.89,
"grad_norm": 0.7959221005439758,
"learning_rate": 0.00010532292405961674,
"loss": 0.9653,
"step": 1336
},
{
"epoch": 1.89,
"grad_norm": 0.7081154584884644,
"learning_rate": 0.00010525195173882188,
"loss": 0.8621,
"step": 1337
},
{
"epoch": 1.9,
"grad_norm": 0.7107864618301392,
"learning_rate": 0.00010518097941802698,
"loss": 0.994,
"step": 1338
},
{
"epoch": 1.9,
"grad_norm": 0.7213383316993713,
"learning_rate": 0.00010511000709723209,
"loss": 0.9479,
"step": 1339
},
{
"epoch": 1.9,
"grad_norm": 0.7340273261070251,
"learning_rate": 0.00010503903477643719,
"loss": 0.9278,
"step": 1340
},
{
"epoch": 1.9,
"grad_norm": 0.7713385820388794,
"learning_rate": 0.0001049680624556423,
"loss": 1.0469,
"step": 1341
},
{
"epoch": 1.9,
"grad_norm": 0.6916841268539429,
"learning_rate": 0.0001048970901348474,
"loss": 0.9564,
"step": 1342
},
{
"epoch": 1.9,
"grad_norm": 0.7080590128898621,
"learning_rate": 0.00010482611781405252,
"loss": 0.9239,
"step": 1343
},
{
"epoch": 1.9,
"grad_norm": 0.7093188762664795,
"learning_rate": 0.00010475514549325764,
"loss": 0.9249,
"step": 1344
},
{
"epoch": 1.91,
"grad_norm": 0.7387856841087341,
"learning_rate": 0.00010468417317246275,
"loss": 1.0172,
"step": 1345
},
{
"epoch": 1.91,
"grad_norm": 0.6952479481697083,
"learning_rate": 0.00010461320085166785,
"loss": 0.9636,
"step": 1346
},
{
"epoch": 1.91,
"grad_norm": 0.7166315913200378,
"learning_rate": 0.00010454222853087297,
"loss": 0.9475,
"step": 1347
},
{
"epoch": 1.91,
"grad_norm": 0.7198975682258606,
"learning_rate": 0.00010447125621007807,
"loss": 0.9229,
"step": 1348
},
{
"epoch": 1.91,
"grad_norm": 0.7340227961540222,
"learning_rate": 0.00010440028388928318,
"loss": 1.0101,
"step": 1349
},
{
"epoch": 1.91,
"grad_norm": 0.7560737133026123,
"learning_rate": 0.00010432931156848829,
"loss": 1.0183,
"step": 1350
},
{
"epoch": 1.91,
"grad_norm": 0.7338775396347046,
"learning_rate": 0.00010425833924769342,
"loss": 0.9869,
"step": 1351
},
{
"epoch": 1.92,
"grad_norm": 0.7228880524635315,
"learning_rate": 0.00010418736692689852,
"loss": 0.9771,
"step": 1352
},
{
"epoch": 1.92,
"grad_norm": 0.7613531351089478,
"learning_rate": 0.00010411639460610363,
"loss": 0.9756,
"step": 1353
},
{
"epoch": 1.92,
"grad_norm": 0.7517747282981873,
"learning_rate": 0.00010404542228530874,
"loss": 0.9873,
"step": 1354
},
{
"epoch": 1.92,
"grad_norm": 0.7492972612380981,
"learning_rate": 0.00010397444996451384,
"loss": 1.0389,
"step": 1355
},
{
"epoch": 1.92,
"grad_norm": 0.7404029965400696,
"learning_rate": 0.00010390347764371895,
"loss": 1.0034,
"step": 1356
},
{
"epoch": 1.92,
"grad_norm": 0.6921476125717163,
"learning_rate": 0.00010383250532292405,
"loss": 0.9379,
"step": 1357
},
{
"epoch": 1.92,
"grad_norm": 0.7141453623771667,
"learning_rate": 0.00010376153300212918,
"loss": 0.8902,
"step": 1358
},
{
"epoch": 1.93,
"grad_norm": 0.7601017355918884,
"learning_rate": 0.00010369056068133429,
"loss": 0.9661,
"step": 1359
},
{
"epoch": 1.93,
"grad_norm": 0.7252589464187622,
"learning_rate": 0.0001036195883605394,
"loss": 0.9692,
"step": 1360
},
{
"epoch": 1.93,
"grad_norm": 0.7792155742645264,
"learning_rate": 0.0001035486160397445,
"loss": 0.9614,
"step": 1361
},
{
"epoch": 1.93,
"grad_norm": 0.7573446035385132,
"learning_rate": 0.00010347764371894962,
"loss": 0.8806,
"step": 1362
},
{
"epoch": 1.93,
"grad_norm": 0.7885202169418335,
"learning_rate": 0.00010340667139815472,
"loss": 0.9797,
"step": 1363
},
{
"epoch": 1.93,
"grad_norm": 0.7697455286979675,
"learning_rate": 0.00010333569907735983,
"loss": 0.9303,
"step": 1364
},
{
"epoch": 1.93,
"grad_norm": 0.7955215573310852,
"learning_rate": 0.00010326472675656495,
"loss": 1.0014,
"step": 1365
},
{
"epoch": 1.94,
"grad_norm": 0.7274532318115234,
"learning_rate": 0.00010319375443577007,
"loss": 0.9392,
"step": 1366
},
{
"epoch": 1.94,
"grad_norm": 0.7063309550285339,
"learning_rate": 0.00010312278211497517,
"loss": 0.9254,
"step": 1367
},
{
"epoch": 1.94,
"grad_norm": 0.6960466504096985,
"learning_rate": 0.00010305180979418028,
"loss": 0.9474,
"step": 1368
},
{
"epoch": 1.94,
"grad_norm": 0.6692228317260742,
"learning_rate": 0.00010298083747338538,
"loss": 0.9358,
"step": 1369
},
{
"epoch": 1.94,
"grad_norm": 0.7357027530670166,
"learning_rate": 0.00010290986515259049,
"loss": 1.055,
"step": 1370
},
{
"epoch": 1.94,
"grad_norm": 0.7488313317298889,
"learning_rate": 0.00010283889283179559,
"loss": 0.8772,
"step": 1371
},
{
"epoch": 1.94,
"grad_norm": 0.7428348660469055,
"learning_rate": 0.0001027679205110007,
"loss": 1.0286,
"step": 1372
},
{
"epoch": 1.95,
"grad_norm": 0.7692130208015442,
"learning_rate": 0.00010269694819020583,
"loss": 0.9619,
"step": 1373
},
{
"epoch": 1.95,
"grad_norm": 0.748082160949707,
"learning_rate": 0.00010262597586941094,
"loss": 0.9234,
"step": 1374
},
{
"epoch": 1.95,
"grad_norm": 0.7470769882202148,
"learning_rate": 0.00010255500354861604,
"loss": 0.9904,
"step": 1375
},
{
"epoch": 1.95,
"grad_norm": 0.8034458160400391,
"learning_rate": 0.00010248403122782115,
"loss": 1.0794,
"step": 1376
},
{
"epoch": 1.95,
"grad_norm": 0.7386705875396729,
"learning_rate": 0.00010241305890702627,
"loss": 0.9268,
"step": 1377
},
{
"epoch": 1.95,
"grad_norm": 0.7536996603012085,
"learning_rate": 0.00010234208658623137,
"loss": 1.0428,
"step": 1378
},
{
"epoch": 1.95,
"grad_norm": 0.753163754940033,
"learning_rate": 0.00010227111426543648,
"loss": 1.0112,
"step": 1379
},
{
"epoch": 1.96,
"grad_norm": 0.7234356999397278,
"learning_rate": 0.0001022001419446416,
"loss": 0.9551,
"step": 1380
},
{
"epoch": 1.96,
"grad_norm": 0.7315410375595093,
"learning_rate": 0.0001021291696238467,
"loss": 0.9997,
"step": 1381
},
{
"epoch": 1.96,
"grad_norm": 0.7085781097412109,
"learning_rate": 0.00010205819730305182,
"loss": 0.987,
"step": 1382
},
{
"epoch": 1.96,
"grad_norm": 0.7303948998451233,
"learning_rate": 0.00010198722498225693,
"loss": 0.9763,
"step": 1383
},
{
"epoch": 1.96,
"grad_norm": 0.7143583297729492,
"learning_rate": 0.00010191625266146203,
"loss": 0.8577,
"step": 1384
},
{
"epoch": 1.96,
"grad_norm": 0.7335658073425293,
"learning_rate": 0.00010184528034066714,
"loss": 0.9503,
"step": 1385
},
{
"epoch": 1.96,
"grad_norm": 0.7341468334197998,
"learning_rate": 0.00010177430801987224,
"loss": 0.9232,
"step": 1386
},
{
"epoch": 1.96,
"grad_norm": 0.7755731344223022,
"learning_rate": 0.00010170333569907737,
"loss": 1.0562,
"step": 1387
},
{
"epoch": 1.97,
"grad_norm": 0.7738588452339172,
"learning_rate": 0.00010163236337828248,
"loss": 0.9736,
"step": 1388
},
{
"epoch": 1.97,
"grad_norm": 0.7616381645202637,
"learning_rate": 0.00010156139105748759,
"loss": 0.9838,
"step": 1389
},
{
"epoch": 1.97,
"grad_norm": 0.7650443315505981,
"learning_rate": 0.00010149041873669269,
"loss": 0.9184,
"step": 1390
},
{
"epoch": 1.97,
"grad_norm": 0.7394657135009766,
"learning_rate": 0.0001014194464158978,
"loss": 0.8795,
"step": 1391
},
{
"epoch": 1.97,
"grad_norm": 0.6988821625709534,
"learning_rate": 0.0001013484740951029,
"loss": 0.9545,
"step": 1392
},
{
"epoch": 1.97,
"grad_norm": 0.7445807456970215,
"learning_rate": 0.00010127750177430802,
"loss": 0.9847,
"step": 1393
},
{
"epoch": 1.97,
"grad_norm": 0.7342961430549622,
"learning_rate": 0.00010120652945351314,
"loss": 0.9654,
"step": 1394
},
{
"epoch": 1.98,
"grad_norm": 0.726063072681427,
"learning_rate": 0.00010113555713271826,
"loss": 0.9411,
"step": 1395
},
{
"epoch": 1.98,
"grad_norm": 0.6817072629928589,
"learning_rate": 0.00010106458481192335,
"loss": 0.9266,
"step": 1396
},
{
"epoch": 1.98,
"grad_norm": 0.717892050743103,
"learning_rate": 0.00010099361249112847,
"loss": 0.9439,
"step": 1397
},
{
"epoch": 1.98,
"grad_norm": 0.7288515567779541,
"learning_rate": 0.00010092264017033357,
"loss": 1.0106,
"step": 1398
},
{
"epoch": 1.98,
"grad_norm": 0.7539445757865906,
"learning_rate": 0.00010085166784953868,
"loss": 1.0806,
"step": 1399
},
{
"epoch": 1.98,
"grad_norm": 0.7196104526519775,
"learning_rate": 0.00010078069552874378,
"loss": 0.8989,
"step": 1400
},
{
"epoch": 1.98,
"grad_norm": 0.7768797278404236,
"learning_rate": 0.00010070972320794892,
"loss": 0.8933,
"step": 1401
},
{
"epoch": 1.99,
"grad_norm": 0.7455546855926514,
"learning_rate": 0.00010063875088715402,
"loss": 1.0315,
"step": 1402
},
{
"epoch": 1.99,
"grad_norm": 0.7538041472434998,
"learning_rate": 0.00010056777856635913,
"loss": 1.001,
"step": 1403
},
{
"epoch": 1.99,
"grad_norm": 0.7568172216415405,
"learning_rate": 0.00010049680624556423,
"loss": 1.0073,
"step": 1404
},
{
"epoch": 1.99,
"grad_norm": 0.761991024017334,
"learning_rate": 0.00010042583392476934,
"loss": 0.9716,
"step": 1405
},
{
"epoch": 1.99,
"grad_norm": 0.7252638339996338,
"learning_rate": 0.00010035486160397445,
"loss": 0.9636,
"step": 1406
},
{
"epoch": 1.99,
"grad_norm": 0.7555544972419739,
"learning_rate": 0.00010028388928317955,
"loss": 0.9955,
"step": 1407
},
{
"epoch": 1.99,
"grad_norm": 0.7491613030433655,
"learning_rate": 0.00010021291696238468,
"loss": 1.0634,
"step": 1408
},
{
"epoch": 2.0,
"grad_norm": 0.7551286220550537,
"learning_rate": 0.00010014194464158979,
"loss": 0.9722,
"step": 1409
},
{
"epoch": 2.0,
"grad_norm": 0.7532618045806885,
"learning_rate": 0.00010007097232079489,
"loss": 0.9702,
"step": 1410
},
{
"epoch": 2.0,
"grad_norm": 0.7265384793281555,
"learning_rate": 0.0001,
"loss": 0.9758,
"step": 1411
},
{
"epoch": 2.0,
"grad_norm": 0.6909193992614746,
"learning_rate": 9.992902767920512e-05,
"loss": 0.8897,
"step": 1412
},
{
"epoch": 2.0,
"grad_norm": 0.6946807503700256,
"learning_rate": 9.985805535841023e-05,
"loss": 0.7733,
"step": 1413
},
{
"epoch": 2.0,
"grad_norm": 0.6953454613685608,
"learning_rate": 9.978708303761534e-05,
"loss": 0.7321,
"step": 1414
},
{
"epoch": 2.0,
"grad_norm": 0.7531546950340271,
"learning_rate": 9.971611071682044e-05,
"loss": 0.788,
"step": 1415
},
{
"epoch": 2.01,
"grad_norm": 0.8027557730674744,
"learning_rate": 9.964513839602555e-05,
"loss": 0.6882,
"step": 1416
},
{
"epoch": 2.01,
"grad_norm": 0.9294509291648865,
"learning_rate": 9.957416607523067e-05,
"loss": 0.7098,
"step": 1417
},
{
"epoch": 2.01,
"grad_norm": 1.0754622220993042,
"learning_rate": 9.950319375443578e-05,
"loss": 0.716,
"step": 1418
},
{
"epoch": 2.01,
"grad_norm": 0.9674079418182373,
"learning_rate": 9.943222143364088e-05,
"loss": 0.6679,
"step": 1419
},
{
"epoch": 2.01,
"grad_norm": 0.9014368057250977,
"learning_rate": 9.9361249112846e-05,
"loss": 0.713,
"step": 1420
},
{
"epoch": 2.01,
"grad_norm": 0.8310858607292175,
"learning_rate": 9.92902767920511e-05,
"loss": 0.6925,
"step": 1421
},
{
"epoch": 2.01,
"grad_norm": 0.8001790642738342,
"learning_rate": 9.921930447125622e-05,
"loss": 0.7517,
"step": 1422
},
{
"epoch": 2.02,
"grad_norm": 0.7294822335243225,
"learning_rate": 9.914833215046132e-05,
"loss": 0.6376,
"step": 1423
},
{
"epoch": 2.02,
"grad_norm": 0.7423235177993774,
"learning_rate": 9.907735982966644e-05,
"loss": 0.7022,
"step": 1424
},
{
"epoch": 2.02,
"grad_norm": 0.7563339471817017,
"learning_rate": 9.900638750887154e-05,
"loss": 0.6896,
"step": 1425
},
{
"epoch": 2.02,
"grad_norm": 0.8096082210540771,
"learning_rate": 9.893541518807666e-05,
"loss": 0.6826,
"step": 1426
},
{
"epoch": 2.02,
"grad_norm": 0.8095359802246094,
"learning_rate": 9.886444286728177e-05,
"loss": 0.6946,
"step": 1427
},
{
"epoch": 2.02,
"grad_norm": 0.8249964118003845,
"learning_rate": 9.879347054648688e-05,
"loss": 0.6902,
"step": 1428
},
{
"epoch": 2.02,
"grad_norm": 0.8830640316009521,
"learning_rate": 9.872249822569198e-05,
"loss": 0.6451,
"step": 1429
},
{
"epoch": 2.03,
"grad_norm": 0.9052359461784363,
"learning_rate": 9.865152590489709e-05,
"loss": 0.7589,
"step": 1430
},
{
"epoch": 2.03,
"grad_norm": 0.8446089029312134,
"learning_rate": 9.85805535841022e-05,
"loss": 0.6526,
"step": 1431
},
{
"epoch": 2.03,
"grad_norm": 0.84389728307724,
"learning_rate": 9.850958126330732e-05,
"loss": 0.7144,
"step": 1432
},
{
"epoch": 2.03,
"grad_norm": 0.8531579971313477,
"learning_rate": 9.843860894251242e-05,
"loss": 0.7598,
"step": 1433
},
{
"epoch": 2.03,
"grad_norm": 0.7820523977279663,
"learning_rate": 9.836763662171753e-05,
"loss": 0.666,
"step": 1434
},
{
"epoch": 2.03,
"grad_norm": 0.8026299476623535,
"learning_rate": 9.829666430092264e-05,
"loss": 0.689,
"step": 1435
},
{
"epoch": 2.03,
"grad_norm": 0.8456664681434631,
"learning_rate": 9.822569198012776e-05,
"loss": 0.7466,
"step": 1436
},
{
"epoch": 2.04,
"grad_norm": 0.81344074010849,
"learning_rate": 9.815471965933285e-05,
"loss": 0.6924,
"step": 1437
},
{
"epoch": 2.04,
"grad_norm": 0.8156119585037231,
"learning_rate": 9.808374733853798e-05,
"loss": 0.6812,
"step": 1438
},
{
"epoch": 2.04,
"grad_norm": 0.8709864020347595,
"learning_rate": 9.801277501774308e-05,
"loss": 0.7375,
"step": 1439
},
{
"epoch": 2.04,
"grad_norm": 0.8173854947090149,
"learning_rate": 9.794180269694819e-05,
"loss": 0.7124,
"step": 1440
},
{
"epoch": 2.04,
"grad_norm": 0.8464558124542236,
"learning_rate": 9.78708303761533e-05,
"loss": 0.7316,
"step": 1441
},
{
"epoch": 2.04,
"grad_norm": 0.8382313251495361,
"learning_rate": 9.779985805535842e-05,
"loss": 0.6897,
"step": 1442
},
{
"epoch": 2.04,
"grad_norm": 0.8649709820747375,
"learning_rate": 9.772888573456353e-05,
"loss": 0.7518,
"step": 1443
},
{
"epoch": 2.05,
"grad_norm": 0.7927874326705933,
"learning_rate": 9.765791341376863e-05,
"loss": 0.7071,
"step": 1444
},
{
"epoch": 2.05,
"grad_norm": 0.8561937808990479,
"learning_rate": 9.758694109297374e-05,
"loss": 0.7027,
"step": 1445
},
{
"epoch": 2.05,
"grad_norm": 0.8343231678009033,
"learning_rate": 9.751596877217886e-05,
"loss": 0.7474,
"step": 1446
},
{
"epoch": 2.05,
"grad_norm": 0.8136131763458252,
"learning_rate": 9.744499645138397e-05,
"loss": 0.7358,
"step": 1447
},
{
"epoch": 2.05,
"grad_norm": 0.8063412308692932,
"learning_rate": 9.737402413058907e-05,
"loss": 0.7115,
"step": 1448
},
{
"epoch": 2.05,
"grad_norm": 0.8385120630264282,
"learning_rate": 9.73030518097942e-05,
"loss": 0.7042,
"step": 1449
},
{
"epoch": 2.05,
"grad_norm": 0.8496181964874268,
"learning_rate": 9.723207948899929e-05,
"loss": 0.7378,
"step": 1450
},
{
"epoch": 2.06,
"grad_norm": 0.8625522255897522,
"learning_rate": 9.71611071682044e-05,
"loss": 0.69,
"step": 1451
},
{
"epoch": 2.06,
"grad_norm": 0.8408932089805603,
"learning_rate": 9.709013484740952e-05,
"loss": 0.6602,
"step": 1452
},
{
"epoch": 2.06,
"grad_norm": 0.82053142786026,
"learning_rate": 9.701916252661463e-05,
"loss": 0.6759,
"step": 1453
},
{
"epoch": 2.06,
"grad_norm": 0.8382329344749451,
"learning_rate": 9.694819020581973e-05,
"loss": 0.6741,
"step": 1454
},
{
"epoch": 2.06,
"grad_norm": 0.8781008720397949,
"learning_rate": 9.687721788502484e-05,
"loss": 0.7876,
"step": 1455
},
{
"epoch": 2.06,
"grad_norm": 0.8545266389846802,
"learning_rate": 9.680624556422996e-05,
"loss": 0.7504,
"step": 1456
},
{
"epoch": 2.06,
"grad_norm": 0.8554444909095764,
"learning_rate": 9.673527324343507e-05,
"loss": 0.7148,
"step": 1457
},
{
"epoch": 2.07,
"grad_norm": 0.8741387128829956,
"learning_rate": 9.666430092264017e-05,
"loss": 0.7527,
"step": 1458
},
{
"epoch": 2.07,
"grad_norm": 0.8928270936012268,
"learning_rate": 9.65933286018453e-05,
"loss": 0.7233,
"step": 1459
},
{
"epoch": 2.07,
"grad_norm": 0.8724638223648071,
"learning_rate": 9.652235628105039e-05,
"loss": 0.7014,
"step": 1460
},
{
"epoch": 2.07,
"grad_norm": 0.8374599814414978,
"learning_rate": 9.64513839602555e-05,
"loss": 0.6646,
"step": 1461
},
{
"epoch": 2.07,
"grad_norm": 0.8397142291069031,
"learning_rate": 9.63804116394606e-05,
"loss": 0.6829,
"step": 1462
},
{
"epoch": 2.07,
"grad_norm": 0.7984825372695923,
"learning_rate": 9.630943931866573e-05,
"loss": 0.748,
"step": 1463
},
{
"epoch": 2.07,
"grad_norm": 0.8543975353240967,
"learning_rate": 9.623846699787083e-05,
"loss": 0.6743,
"step": 1464
},
{
"epoch": 2.08,
"grad_norm": 0.8055514693260193,
"learning_rate": 9.616749467707594e-05,
"loss": 0.6725,
"step": 1465
},
{
"epoch": 2.08,
"grad_norm": 0.8549181818962097,
"learning_rate": 9.609652235628106e-05,
"loss": 0.7744,
"step": 1466
},
{
"epoch": 2.08,
"grad_norm": 0.8540986180305481,
"learning_rate": 9.602555003548617e-05,
"loss": 0.779,
"step": 1467
},
{
"epoch": 2.08,
"grad_norm": 0.8481354117393494,
"learning_rate": 9.595457771469127e-05,
"loss": 0.706,
"step": 1468
},
{
"epoch": 2.08,
"grad_norm": 0.8669779896736145,
"learning_rate": 9.588360539389638e-05,
"loss": 0.6772,
"step": 1469
},
{
"epoch": 2.08,
"grad_norm": 0.9160670638084412,
"learning_rate": 9.581263307310149e-05,
"loss": 0.7225,
"step": 1470
},
{
"epoch": 2.08,
"grad_norm": 0.9088666439056396,
"learning_rate": 9.57416607523066e-05,
"loss": 0.7003,
"step": 1471
},
{
"epoch": 2.09,
"grad_norm": 0.906645655632019,
"learning_rate": 9.567068843151172e-05,
"loss": 0.7324,
"step": 1472
},
{
"epoch": 2.09,
"grad_norm": 0.8714525103569031,
"learning_rate": 9.559971611071683e-05,
"loss": 0.7198,
"step": 1473
},
{
"epoch": 2.09,
"grad_norm": 0.8619925379753113,
"learning_rate": 9.552874378992194e-05,
"loss": 0.7415,
"step": 1474
},
{
"epoch": 2.09,
"grad_norm": 0.8662142157554626,
"learning_rate": 9.545777146912704e-05,
"loss": 0.7169,
"step": 1475
},
{
"epoch": 2.09,
"grad_norm": 0.8264342546463013,
"learning_rate": 9.538679914833216e-05,
"loss": 0.7027,
"step": 1476
},
{
"epoch": 2.09,
"grad_norm": 0.849160373210907,
"learning_rate": 9.531582682753727e-05,
"loss": 0.7204,
"step": 1477
},
{
"epoch": 2.09,
"grad_norm": 0.8109767436981201,
"learning_rate": 9.524485450674238e-05,
"loss": 0.7396,
"step": 1478
},
{
"epoch": 2.1,
"grad_norm": 0.8427298069000244,
"learning_rate": 9.517388218594748e-05,
"loss": 0.7111,
"step": 1479
},
{
"epoch": 2.1,
"grad_norm": 0.8257899284362793,
"learning_rate": 9.510290986515259e-05,
"loss": 0.6849,
"step": 1480
},
{
"epoch": 2.1,
"grad_norm": 0.8619416356086731,
"learning_rate": 9.50319375443577e-05,
"loss": 0.6701,
"step": 1481
},
{
"epoch": 2.1,
"grad_norm": 0.9152725338935852,
"learning_rate": 9.496096522356282e-05,
"loss": 0.7793,
"step": 1482
},
{
"epoch": 2.1,
"grad_norm": 0.9197123050689697,
"learning_rate": 9.488999290276792e-05,
"loss": 0.7291,
"step": 1483
},
{
"epoch": 2.1,
"grad_norm": 0.8989003896713257,
"learning_rate": 9.481902058197304e-05,
"loss": 0.733,
"step": 1484
},
{
"epoch": 2.1,
"grad_norm": 0.8624677062034607,
"learning_rate": 9.474804826117814e-05,
"loss": 0.7366,
"step": 1485
},
{
"epoch": 2.11,
"grad_norm": 0.8718342185020447,
"learning_rate": 9.467707594038326e-05,
"loss": 0.7399,
"step": 1486
},
{
"epoch": 2.11,
"grad_norm": 0.868829071521759,
"learning_rate": 9.460610361958836e-05,
"loss": 0.7323,
"step": 1487
},
{
"epoch": 2.11,
"grad_norm": 0.8415536284446716,
"learning_rate": 9.453513129879348e-05,
"loss": 0.712,
"step": 1488
},
{
"epoch": 2.11,
"grad_norm": 0.8465087413787842,
"learning_rate": 9.446415897799858e-05,
"loss": 0.7202,
"step": 1489
},
{
"epoch": 2.11,
"grad_norm": 0.8721869587898254,
"learning_rate": 9.43931866572037e-05,
"loss": 0.7269,
"step": 1490
},
{
"epoch": 2.11,
"grad_norm": 0.8698458671569824,
"learning_rate": 9.43222143364088e-05,
"loss": 0.7442,
"step": 1491
},
{
"epoch": 2.11,
"grad_norm": 0.8857313394546509,
"learning_rate": 9.425124201561392e-05,
"loss": 0.7525,
"step": 1492
},
{
"epoch": 2.12,
"grad_norm": 0.8833426833152771,
"learning_rate": 9.418026969481902e-05,
"loss": 0.7618,
"step": 1493
},
{
"epoch": 2.12,
"grad_norm": 0.8640385866165161,
"learning_rate": 9.410929737402413e-05,
"loss": 0.7094,
"step": 1494
},
{
"epoch": 2.12,
"grad_norm": 0.8465377688407898,
"learning_rate": 9.403832505322924e-05,
"loss": 0.7406,
"step": 1495
},
{
"epoch": 2.12,
"grad_norm": 0.8516689538955688,
"learning_rate": 9.396735273243436e-05,
"loss": 0.7149,
"step": 1496
},
{
"epoch": 2.12,
"grad_norm": 0.8319384455680847,
"learning_rate": 9.389638041163946e-05,
"loss": 0.6586,
"step": 1497
},
{
"epoch": 2.12,
"grad_norm": 0.8527852892875671,
"learning_rate": 9.382540809084458e-05,
"loss": 0.6629,
"step": 1498
},
{
"epoch": 2.12,
"grad_norm": 0.8378131985664368,
"learning_rate": 9.375443577004968e-05,
"loss": 0.704,
"step": 1499
},
{
"epoch": 2.13,
"grad_norm": 0.8492695689201355,
"learning_rate": 9.36834634492548e-05,
"loss": 0.6833,
"step": 1500
},
{
"epoch": 2.13,
"grad_norm": 0.8611241579055786,
"learning_rate": 9.36124911284599e-05,
"loss": 0.6794,
"step": 1501
},
{
"epoch": 2.13,
"grad_norm": 0.914234459400177,
"learning_rate": 9.354151880766502e-05,
"loss": 0.7751,
"step": 1502
},
{
"epoch": 2.13,
"grad_norm": 0.8970404863357544,
"learning_rate": 9.347054648687013e-05,
"loss": 0.7603,
"step": 1503
},
{
"epoch": 2.13,
"grad_norm": 0.8614270687103271,
"learning_rate": 9.339957416607523e-05,
"loss": 0.7352,
"step": 1504
},
{
"epoch": 2.13,
"grad_norm": 0.8698607683181763,
"learning_rate": 9.332860184528034e-05,
"loss": 0.7116,
"step": 1505
},
{
"epoch": 2.13,
"grad_norm": 0.8466779589653015,
"learning_rate": 9.325762952448546e-05,
"loss": 0.695,
"step": 1506
},
{
"epoch": 2.13,
"grad_norm": 0.8445769548416138,
"learning_rate": 9.318665720369057e-05,
"loss": 0.7525,
"step": 1507
},
{
"epoch": 2.14,
"grad_norm": 0.8695936799049377,
"learning_rate": 9.311568488289567e-05,
"loss": 0.7352,
"step": 1508
},
{
"epoch": 2.14,
"grad_norm": 0.8878143429756165,
"learning_rate": 9.30447125621008e-05,
"loss": 0.713,
"step": 1509
},
{
"epoch": 2.14,
"grad_norm": 0.8836119174957275,
"learning_rate": 9.29737402413059e-05,
"loss": 0.7132,
"step": 1510
},
{
"epoch": 2.14,
"grad_norm": 0.8474823236465454,
"learning_rate": 9.2902767920511e-05,
"loss": 0.6765,
"step": 1511
},
{
"epoch": 2.14,
"grad_norm": 0.8740355968475342,
"learning_rate": 9.283179559971612e-05,
"loss": 0.7044,
"step": 1512
},
{
"epoch": 2.14,
"grad_norm": 0.8775798082351685,
"learning_rate": 9.276082327892123e-05,
"loss": 0.7338,
"step": 1513
},
{
"epoch": 2.14,
"grad_norm": 0.8951154351234436,
"learning_rate": 9.268985095812633e-05,
"loss": 0.723,
"step": 1514
},
{
"epoch": 2.15,
"grad_norm": 0.891923189163208,
"learning_rate": 9.261887863733144e-05,
"loss": 0.7056,
"step": 1515
},
{
"epoch": 2.15,
"grad_norm": 0.8443423509597778,
"learning_rate": 9.254790631653656e-05,
"loss": 0.6884,
"step": 1516
},
{
"epoch": 2.15,
"grad_norm": 0.8721277713775635,
"learning_rate": 9.247693399574167e-05,
"loss": 0.6926,
"step": 1517
},
{
"epoch": 2.15,
"grad_norm": 0.9046376943588257,
"learning_rate": 9.240596167494677e-05,
"loss": 0.7471,
"step": 1518
},
{
"epoch": 2.15,
"grad_norm": 0.8587860465049744,
"learning_rate": 9.23349893541519e-05,
"loss": 0.726,
"step": 1519
},
{
"epoch": 2.15,
"grad_norm": 0.8803458213806152,
"learning_rate": 9.2264017033357e-05,
"loss": 0.7576,
"step": 1520
},
{
"epoch": 2.15,
"grad_norm": 0.8257414102554321,
"learning_rate": 9.21930447125621e-05,
"loss": 0.6852,
"step": 1521
},
{
"epoch": 2.16,
"grad_norm": 0.8487634658813477,
"learning_rate": 9.21220723917672e-05,
"loss": 0.7674,
"step": 1522
},
{
"epoch": 2.16,
"grad_norm": 0.8407074213027954,
"learning_rate": 9.205110007097233e-05,
"loss": 0.6811,
"step": 1523
},
{
"epoch": 2.16,
"grad_norm": 0.8839500546455383,
"learning_rate": 9.198012775017743e-05,
"loss": 0.7953,
"step": 1524
},
{
"epoch": 2.16,
"grad_norm": 0.875453531742096,
"learning_rate": 9.190915542938254e-05,
"loss": 0.7141,
"step": 1525
},
{
"epoch": 2.16,
"grad_norm": 0.8481379747390747,
"learning_rate": 9.183818310858766e-05,
"loss": 0.6424,
"step": 1526
},
{
"epoch": 2.16,
"grad_norm": 0.8793319463729858,
"learning_rate": 9.176721078779277e-05,
"loss": 0.6961,
"step": 1527
},
{
"epoch": 2.16,
"grad_norm": 0.8625543117523193,
"learning_rate": 9.169623846699787e-05,
"loss": 0.7493,
"step": 1528
},
{
"epoch": 2.17,
"grad_norm": 0.8540360927581787,
"learning_rate": 9.162526614620298e-05,
"loss": 0.7261,
"step": 1529
},
{
"epoch": 2.17,
"grad_norm": 0.8655250072479248,
"learning_rate": 9.15542938254081e-05,
"loss": 0.7124,
"step": 1530
},
{
"epoch": 2.17,
"grad_norm": 0.8972883224487305,
"learning_rate": 9.14833215046132e-05,
"loss": 0.7118,
"step": 1531
},
{
"epoch": 2.17,
"grad_norm": 0.8736087679862976,
"learning_rate": 9.141234918381832e-05,
"loss": 0.7169,
"step": 1532
},
{
"epoch": 2.17,
"grad_norm": 0.8996816873550415,
"learning_rate": 9.134137686302342e-05,
"loss": 0.7213,
"step": 1533
},
{
"epoch": 2.17,
"grad_norm": 0.8447397351264954,
"learning_rate": 9.127040454222854e-05,
"loss": 0.7168,
"step": 1534
},
{
"epoch": 2.17,
"grad_norm": 0.837806224822998,
"learning_rate": 9.119943222143364e-05,
"loss": 0.71,
"step": 1535
},
{
"epoch": 2.18,
"grad_norm": 0.8344355821609497,
"learning_rate": 9.112845990063876e-05,
"loss": 0.6721,
"step": 1536
},
{
"epoch": 2.18,
"grad_norm": 0.8420239090919495,
"learning_rate": 9.105748757984387e-05,
"loss": 0.6764,
"step": 1537
},
{
"epoch": 2.18,
"grad_norm": 0.8825182914733887,
"learning_rate": 9.098651525904898e-05,
"loss": 0.709,
"step": 1538
},
{
"epoch": 2.18,
"grad_norm": 0.9009390473365784,
"learning_rate": 9.091554293825408e-05,
"loss": 0.7687,
"step": 1539
},
{
"epoch": 2.18,
"grad_norm": 0.854705810546875,
"learning_rate": 9.08445706174592e-05,
"loss": 0.7118,
"step": 1540
},
{
"epoch": 2.18,
"grad_norm": 0.9011061191558838,
"learning_rate": 9.07735982966643e-05,
"loss": 0.716,
"step": 1541
},
{
"epoch": 2.18,
"grad_norm": 0.8979266285896301,
"learning_rate": 9.070262597586942e-05,
"loss": 0.7841,
"step": 1542
},
{
"epoch": 2.19,
"grad_norm": 0.9013125896453857,
"learning_rate": 9.063165365507452e-05,
"loss": 0.7402,
"step": 1543
},
{
"epoch": 2.19,
"grad_norm": 0.8848702907562256,
"learning_rate": 9.056068133427964e-05,
"loss": 0.7384,
"step": 1544
},
{
"epoch": 2.19,
"grad_norm": 0.9167879819869995,
"learning_rate": 9.048970901348474e-05,
"loss": 0.8391,
"step": 1545
},
{
"epoch": 2.19,
"grad_norm": 0.8656322360038757,
"learning_rate": 9.041873669268986e-05,
"loss": 0.7492,
"step": 1546
},
{
"epoch": 2.19,
"grad_norm": 0.842842698097229,
"learning_rate": 9.034776437189496e-05,
"loss": 0.7487,
"step": 1547
},
{
"epoch": 2.19,
"grad_norm": 0.8449062705039978,
"learning_rate": 9.027679205110008e-05,
"loss": 0.761,
"step": 1548
},
{
"epoch": 2.19,
"grad_norm": 0.8444206118583679,
"learning_rate": 9.020581973030518e-05,
"loss": 0.6761,
"step": 1549
},
{
"epoch": 2.2,
"grad_norm": 0.8564352989196777,
"learning_rate": 9.01348474095103e-05,
"loss": 0.693,
"step": 1550
},
{
"epoch": 2.2,
"grad_norm": 0.8907448053359985,
"learning_rate": 9.006387508871541e-05,
"loss": 0.7221,
"step": 1551
},
{
"epoch": 2.2,
"grad_norm": 0.8845468163490295,
"learning_rate": 8.999290276792052e-05,
"loss": 0.7229,
"step": 1552
},
{
"epoch": 2.2,
"grad_norm": 0.8686636090278625,
"learning_rate": 8.992193044712562e-05,
"loss": 0.6997,
"step": 1553
},
{
"epoch": 2.2,
"grad_norm": 0.9412215948104858,
"learning_rate": 8.985095812633073e-05,
"loss": 0.7125,
"step": 1554
},
{
"epoch": 2.2,
"grad_norm": 0.8855116963386536,
"learning_rate": 8.977998580553584e-05,
"loss": 0.7206,
"step": 1555
},
{
"epoch": 2.2,
"grad_norm": 0.8830530643463135,
"learning_rate": 8.970901348474096e-05,
"loss": 0.6728,
"step": 1556
},
{
"epoch": 2.21,
"grad_norm": 0.892914891242981,
"learning_rate": 8.963804116394606e-05,
"loss": 0.6458,
"step": 1557
},
{
"epoch": 2.21,
"grad_norm": 0.8819034099578857,
"learning_rate": 8.956706884315118e-05,
"loss": 0.7155,
"step": 1558
},
{
"epoch": 2.21,
"grad_norm": 0.8601769208908081,
"learning_rate": 8.949609652235628e-05,
"loss": 0.7252,
"step": 1559
},
{
"epoch": 2.21,
"grad_norm": 0.8619159460067749,
"learning_rate": 8.94251242015614e-05,
"loss": 0.7115,
"step": 1560
},
{
"epoch": 2.21,
"grad_norm": 0.8631980419158936,
"learning_rate": 8.935415188076651e-05,
"loss": 0.7112,
"step": 1561
},
{
"epoch": 2.21,
"grad_norm": 0.888449490070343,
"learning_rate": 8.928317955997162e-05,
"loss": 0.7384,
"step": 1562
},
{
"epoch": 2.21,
"grad_norm": 0.8786005973815918,
"learning_rate": 8.921220723917673e-05,
"loss": 0.6962,
"step": 1563
},
{
"epoch": 2.22,
"grad_norm": 0.891455352306366,
"learning_rate": 8.914123491838183e-05,
"loss": 0.7471,
"step": 1564
},
{
"epoch": 2.22,
"grad_norm": 0.8893013596534729,
"learning_rate": 8.907026259758694e-05,
"loss": 0.7321,
"step": 1565
},
{
"epoch": 2.22,
"grad_norm": 0.8937901854515076,
"learning_rate": 8.899929027679206e-05,
"loss": 0.712,
"step": 1566
},
{
"epoch": 2.22,
"grad_norm": 0.9010066986083984,
"learning_rate": 8.892831795599717e-05,
"loss": 0.742,
"step": 1567
},
{
"epoch": 2.22,
"grad_norm": 0.8523016571998596,
"learning_rate": 8.885734563520227e-05,
"loss": 0.6838,
"step": 1568
},
{
"epoch": 2.22,
"grad_norm": 0.8775461912155151,
"learning_rate": 8.87863733144074e-05,
"loss": 0.7036,
"step": 1569
},
{
"epoch": 2.22,
"grad_norm": 0.8605543375015259,
"learning_rate": 8.87154009936125e-05,
"loss": 0.6835,
"step": 1570
},
{
"epoch": 2.23,
"grad_norm": 0.8719514012336731,
"learning_rate": 8.864442867281761e-05,
"loss": 0.747,
"step": 1571
},
{
"epoch": 2.23,
"grad_norm": 0.8636995553970337,
"learning_rate": 8.85734563520227e-05,
"loss": 0.6874,
"step": 1572
},
{
"epoch": 2.23,
"grad_norm": 0.856977641582489,
"learning_rate": 8.850248403122783e-05,
"loss": 0.7234,
"step": 1573
},
{
"epoch": 2.23,
"grad_norm": 0.8813896775245667,
"learning_rate": 8.843151171043293e-05,
"loss": 0.6992,
"step": 1574
},
{
"epoch": 2.23,
"grad_norm": 0.8693534135818481,
"learning_rate": 8.836053938963804e-05,
"loss": 0.7058,
"step": 1575
},
{
"epoch": 2.23,
"grad_norm": 0.8914763331413269,
"learning_rate": 8.828956706884316e-05,
"loss": 0.7247,
"step": 1576
},
{
"epoch": 2.23,
"grad_norm": 0.8628402948379517,
"learning_rate": 8.821859474804827e-05,
"loss": 0.7046,
"step": 1577
},
{
"epoch": 2.24,
"grad_norm": 0.8618187308311462,
"learning_rate": 8.814762242725337e-05,
"loss": 0.7106,
"step": 1578
},
{
"epoch": 2.24,
"grad_norm": 0.8622534871101379,
"learning_rate": 8.807665010645848e-05,
"loss": 0.674,
"step": 1579
},
{
"epoch": 2.24,
"grad_norm": 0.8331353068351746,
"learning_rate": 8.80056777856636e-05,
"loss": 0.6985,
"step": 1580
},
{
"epoch": 2.24,
"grad_norm": 0.8842141032218933,
"learning_rate": 8.793470546486871e-05,
"loss": 0.7563,
"step": 1581
},
{
"epoch": 2.24,
"grad_norm": 0.8404679894447327,
"learning_rate": 8.78637331440738e-05,
"loss": 0.6946,
"step": 1582
},
{
"epoch": 2.24,
"grad_norm": 0.8643913269042969,
"learning_rate": 8.779276082327893e-05,
"loss": 0.6439,
"step": 1583
},
{
"epoch": 2.24,
"grad_norm": 0.8534083366394043,
"learning_rate": 8.772178850248403e-05,
"loss": 0.6621,
"step": 1584
},
{
"epoch": 2.25,
"grad_norm": 0.9061523079872131,
"learning_rate": 8.765081618168914e-05,
"loss": 0.7496,
"step": 1585
},
{
"epoch": 2.25,
"grad_norm": 0.881190299987793,
"learning_rate": 8.757984386089424e-05,
"loss": 0.7319,
"step": 1586
},
{
"epoch": 2.25,
"grad_norm": 0.8903520107269287,
"learning_rate": 8.750887154009937e-05,
"loss": 0.7292,
"step": 1587
},
{
"epoch": 2.25,
"grad_norm": 0.8949286341667175,
"learning_rate": 8.743789921930447e-05,
"loss": 0.7834,
"step": 1588
},
{
"epoch": 2.25,
"grad_norm": 0.8609988689422607,
"learning_rate": 8.736692689850958e-05,
"loss": 0.7096,
"step": 1589
},
{
"epoch": 2.25,
"grad_norm": 0.8585081100463867,
"learning_rate": 8.72959545777147e-05,
"loss": 0.7309,
"step": 1590
},
{
"epoch": 2.25,
"grad_norm": 0.8561607599258423,
"learning_rate": 8.722498225691981e-05,
"loss": 0.7206,
"step": 1591
},
{
"epoch": 2.26,
"grad_norm": 0.8617041707038879,
"learning_rate": 8.715400993612492e-05,
"loss": 0.7539,
"step": 1592
},
{
"epoch": 2.26,
"grad_norm": 0.8783171772956848,
"learning_rate": 8.708303761533002e-05,
"loss": 0.6923,
"step": 1593
},
{
"epoch": 2.26,
"grad_norm": 0.8528505563735962,
"learning_rate": 8.701206529453513e-05,
"loss": 0.7053,
"step": 1594
},
{
"epoch": 2.26,
"grad_norm": 0.8965705633163452,
"learning_rate": 8.694109297374024e-05,
"loss": 0.7004,
"step": 1595
},
{
"epoch": 2.26,
"grad_norm": 0.8787754774093628,
"learning_rate": 8.687012065294536e-05,
"loss": 0.7189,
"step": 1596
},
{
"epoch": 2.26,
"grad_norm": 0.9146087169647217,
"learning_rate": 8.679914833215047e-05,
"loss": 0.6916,
"step": 1597
},
{
"epoch": 2.26,
"grad_norm": 0.931847870349884,
"learning_rate": 8.672817601135558e-05,
"loss": 0.7567,
"step": 1598
},
{
"epoch": 2.27,
"grad_norm": 0.9045712351799011,
"learning_rate": 8.665720369056068e-05,
"loss": 0.755,
"step": 1599
},
{
"epoch": 2.27,
"grad_norm": 0.8855098485946655,
"learning_rate": 8.65862313697658e-05,
"loss": 0.7277,
"step": 1600
},
{
"epoch": 2.27,
"grad_norm": 0.8935732245445251,
"learning_rate": 8.651525904897091e-05,
"loss": 0.7153,
"step": 1601
},
{
"epoch": 2.27,
"grad_norm": 0.9022087454795837,
"learning_rate": 8.644428672817602e-05,
"loss": 0.735,
"step": 1602
},
{
"epoch": 2.27,
"grad_norm": 0.8693438172340393,
"learning_rate": 8.637331440738112e-05,
"loss": 0.7232,
"step": 1603
},
{
"epoch": 2.27,
"grad_norm": 0.85861736536026,
"learning_rate": 8.630234208658625e-05,
"loss": 0.7183,
"step": 1604
},
{
"epoch": 2.27,
"grad_norm": 0.8743335604667664,
"learning_rate": 8.623136976579135e-05,
"loss": 0.7973,
"step": 1605
},
{
"epoch": 2.28,
"grad_norm": 0.8646829128265381,
"learning_rate": 8.616039744499646e-05,
"loss": 0.7171,
"step": 1606
},
{
"epoch": 2.28,
"grad_norm": 0.8387510180473328,
"learning_rate": 8.608942512420156e-05,
"loss": 0.7389,
"step": 1607
},
{
"epoch": 2.28,
"grad_norm": 0.8693937063217163,
"learning_rate": 8.601845280340668e-05,
"loss": 0.7036,
"step": 1608
},
{
"epoch": 2.28,
"grad_norm": 0.8225006461143494,
"learning_rate": 8.594748048261178e-05,
"loss": 0.7298,
"step": 1609
},
{
"epoch": 2.28,
"grad_norm": 0.9259347319602966,
"learning_rate": 8.58765081618169e-05,
"loss": 0.7549,
"step": 1610
},
{
"epoch": 2.28,
"grad_norm": 0.9134079813957214,
"learning_rate": 8.5805535841022e-05,
"loss": 0.7127,
"step": 1611
},
{
"epoch": 2.28,
"grad_norm": 0.9061034917831421,
"learning_rate": 8.573456352022712e-05,
"loss": 0.7162,
"step": 1612
},
{
"epoch": 2.29,
"grad_norm": 0.9124405384063721,
"learning_rate": 8.566359119943222e-05,
"loss": 0.7308,
"step": 1613
},
{
"epoch": 2.29,
"grad_norm": 0.8995393514633179,
"learning_rate": 8.559261887863733e-05,
"loss": 0.7131,
"step": 1614
},
{
"epoch": 2.29,
"grad_norm": 0.9184269905090332,
"learning_rate": 8.552164655784245e-05,
"loss": 0.7235,
"step": 1615
},
{
"epoch": 2.29,
"grad_norm": 0.8481336832046509,
"learning_rate": 8.545067423704756e-05,
"loss": 0.6814,
"step": 1616
},
{
"epoch": 2.29,
"grad_norm": 0.9017961025238037,
"learning_rate": 8.537970191625266e-05,
"loss": 0.6856,
"step": 1617
},
{
"epoch": 2.29,
"grad_norm": 0.8780509829521179,
"learning_rate": 8.530872959545777e-05,
"loss": 0.7255,
"step": 1618
},
{
"epoch": 2.29,
"grad_norm": 0.8859030604362488,
"learning_rate": 8.523775727466288e-05,
"loss": 0.7343,
"step": 1619
},
{
"epoch": 2.3,
"grad_norm": 0.8603264689445496,
"learning_rate": 8.5166784953868e-05,
"loss": 0.7088,
"step": 1620
},
{
"epoch": 2.3,
"grad_norm": 0.8859502077102661,
"learning_rate": 8.509581263307311e-05,
"loss": 0.7264,
"step": 1621
},
{
"epoch": 2.3,
"grad_norm": 0.8135156631469727,
"learning_rate": 8.502484031227822e-05,
"loss": 0.6724,
"step": 1622
},
{
"epoch": 2.3,
"grad_norm": 0.8489874005317688,
"learning_rate": 8.495386799148333e-05,
"loss": 0.6948,
"step": 1623
},
{
"epoch": 2.3,
"grad_norm": 0.8707240223884583,
"learning_rate": 8.488289567068843e-05,
"loss": 0.742,
"step": 1624
},
{
"epoch": 2.3,
"grad_norm": 0.8624871373176575,
"learning_rate": 8.481192334989355e-05,
"loss": 0.6816,
"step": 1625
},
{
"epoch": 2.3,
"grad_norm": 0.8828703165054321,
"learning_rate": 8.474095102909866e-05,
"loss": 0.7385,
"step": 1626
},
{
"epoch": 2.3,
"grad_norm": 0.8730260133743286,
"learning_rate": 8.466997870830377e-05,
"loss": 0.7126,
"step": 1627
},
{
"epoch": 2.31,
"grad_norm": 0.8839560747146606,
"learning_rate": 8.459900638750887e-05,
"loss": 0.7272,
"step": 1628
},
{
"epoch": 2.31,
"grad_norm": 0.8795418739318848,
"learning_rate": 8.4528034066714e-05,
"loss": 0.6953,
"step": 1629
},
{
"epoch": 2.31,
"grad_norm": 0.8948290348052979,
"learning_rate": 8.44570617459191e-05,
"loss": 0.7241,
"step": 1630
},
{
"epoch": 2.31,
"grad_norm": 0.8926292061805725,
"learning_rate": 8.438608942512421e-05,
"loss": 0.6998,
"step": 1631
},
{
"epoch": 2.31,
"grad_norm": 0.9082595705986023,
"learning_rate": 8.431511710432931e-05,
"loss": 0.7715,
"step": 1632
},
{
"epoch": 2.31,
"grad_norm": 0.9164185523986816,
"learning_rate": 8.424414478353443e-05,
"loss": 0.7412,
"step": 1633
},
{
"epoch": 2.31,
"grad_norm": 0.8599331378936768,
"learning_rate": 8.417317246273953e-05,
"loss": 0.7327,
"step": 1634
},
{
"epoch": 2.32,
"grad_norm": 0.8912233114242554,
"learning_rate": 8.410220014194465e-05,
"loss": 0.7073,
"step": 1635
},
{
"epoch": 2.32,
"grad_norm": 0.878819465637207,
"learning_rate": 8.403122782114976e-05,
"loss": 0.6973,
"step": 1636
},
{
"epoch": 2.32,
"grad_norm": 0.8971090912818909,
"learning_rate": 8.396025550035487e-05,
"loss": 0.7356,
"step": 1637
},
{
"epoch": 2.32,
"grad_norm": 0.9173135757446289,
"learning_rate": 8.388928317955997e-05,
"loss": 0.7359,
"step": 1638
},
{
"epoch": 2.32,
"grad_norm": 0.8811411261558533,
"learning_rate": 8.381831085876508e-05,
"loss": 0.6925,
"step": 1639
},
{
"epoch": 2.32,
"grad_norm": 0.8744279146194458,
"learning_rate": 8.37473385379702e-05,
"loss": 0.6386,
"step": 1640
},
{
"epoch": 2.32,
"grad_norm": 0.8834649324417114,
"learning_rate": 8.367636621717531e-05,
"loss": 0.748,
"step": 1641
},
{
"epoch": 2.33,
"grad_norm": 0.840594470500946,
"learning_rate": 8.360539389638041e-05,
"loss": 0.6875,
"step": 1642
},
{
"epoch": 2.33,
"grad_norm": 0.896140992641449,
"learning_rate": 8.353442157558553e-05,
"loss": 0.7505,
"step": 1643
},
{
"epoch": 2.33,
"grad_norm": 0.8742374181747437,
"learning_rate": 8.346344925479063e-05,
"loss": 0.7154,
"step": 1644
},
{
"epoch": 2.33,
"grad_norm": 0.8660913705825806,
"learning_rate": 8.339247693399575e-05,
"loss": 0.7332,
"step": 1645
},
{
"epoch": 2.33,
"grad_norm": 0.8654317855834961,
"learning_rate": 8.332150461320084e-05,
"loss": 0.7588,
"step": 1646
},
{
"epoch": 2.33,
"grad_norm": 0.8597747087478638,
"learning_rate": 8.325053229240597e-05,
"loss": 0.7629,
"step": 1647
},
{
"epoch": 2.33,
"grad_norm": 0.9062843322753906,
"learning_rate": 8.317955997161107e-05,
"loss": 0.8117,
"step": 1648
},
{
"epoch": 2.34,
"grad_norm": 0.8763964176177979,
"learning_rate": 8.310858765081618e-05,
"loss": 0.6865,
"step": 1649
},
{
"epoch": 2.34,
"grad_norm": 0.8947573304176331,
"learning_rate": 8.30376153300213e-05,
"loss": 0.7502,
"step": 1650
},
{
"epoch": 2.34,
"grad_norm": 0.9027217030525208,
"learning_rate": 8.296664300922641e-05,
"loss": 0.7362,
"step": 1651
},
{
"epoch": 2.34,
"grad_norm": 0.9003787040710449,
"learning_rate": 8.289567068843152e-05,
"loss": 0.7266,
"step": 1652
},
{
"epoch": 2.34,
"grad_norm": 0.8940318822860718,
"learning_rate": 8.282469836763662e-05,
"loss": 0.6746,
"step": 1653
},
{
"epoch": 2.34,
"grad_norm": 0.9138883352279663,
"learning_rate": 8.275372604684173e-05,
"loss": 0.7553,
"step": 1654
},
{
"epoch": 2.34,
"grad_norm": 0.8982981443405151,
"learning_rate": 8.268275372604685e-05,
"loss": 0.7519,
"step": 1655
},
{
"epoch": 2.35,
"grad_norm": 0.8930261731147766,
"learning_rate": 8.261178140525196e-05,
"loss": 0.7216,
"step": 1656
},
{
"epoch": 2.35,
"grad_norm": 0.8809303045272827,
"learning_rate": 8.254080908445706e-05,
"loss": 0.7515,
"step": 1657
},
{
"epoch": 2.35,
"grad_norm": 0.8962745070457458,
"learning_rate": 8.246983676366218e-05,
"loss": 0.7273,
"step": 1658
},
{
"epoch": 2.35,
"grad_norm": 0.8883169889450073,
"learning_rate": 8.239886444286728e-05,
"loss": 0.7156,
"step": 1659
},
{
"epoch": 2.35,
"grad_norm": 0.9205072522163391,
"learning_rate": 8.23278921220724e-05,
"loss": 0.7682,
"step": 1660
},
{
"epoch": 2.35,
"grad_norm": 0.8944506645202637,
"learning_rate": 8.225691980127751e-05,
"loss": 0.7306,
"step": 1661
},
{
"epoch": 2.35,
"grad_norm": 0.866554319858551,
"learning_rate": 8.218594748048262e-05,
"loss": 0.6766,
"step": 1662
},
{
"epoch": 2.36,
"grad_norm": 0.8790395855903625,
"learning_rate": 8.211497515968772e-05,
"loss": 0.7525,
"step": 1663
},
{
"epoch": 2.36,
"grad_norm": 0.8888643980026245,
"learning_rate": 8.204400283889283e-05,
"loss": 0.7373,
"step": 1664
},
{
"epoch": 2.36,
"grad_norm": 0.9181011915206909,
"learning_rate": 8.197303051809795e-05,
"loss": 0.7151,
"step": 1665
},
{
"epoch": 2.36,
"grad_norm": 0.8800945281982422,
"learning_rate": 8.190205819730306e-05,
"loss": 0.7438,
"step": 1666
},
{
"epoch": 2.36,
"grad_norm": 0.9163196086883545,
"learning_rate": 8.183108587650816e-05,
"loss": 0.7177,
"step": 1667
},
{
"epoch": 2.36,
"grad_norm": 0.8852064609527588,
"learning_rate": 8.176011355571328e-05,
"loss": 0.7321,
"step": 1668
},
{
"epoch": 2.36,
"grad_norm": 0.8783362507820129,
"learning_rate": 8.168914123491838e-05,
"loss": 0.7223,
"step": 1669
},
{
"epoch": 2.37,
"grad_norm": 0.8974859714508057,
"learning_rate": 8.16181689141235e-05,
"loss": 0.6787,
"step": 1670
},
{
"epoch": 2.37,
"grad_norm": 0.9229791760444641,
"learning_rate": 8.15471965933286e-05,
"loss": 0.76,
"step": 1671
},
{
"epoch": 2.37,
"grad_norm": 0.8831940293312073,
"learning_rate": 8.147622427253372e-05,
"loss": 0.6959,
"step": 1672
},
{
"epoch": 2.37,
"grad_norm": 0.9193075895309448,
"learning_rate": 8.140525195173882e-05,
"loss": 0.6838,
"step": 1673
},
{
"epoch": 2.37,
"grad_norm": 0.9039551615715027,
"learning_rate": 8.133427963094393e-05,
"loss": 0.7676,
"step": 1674
},
{
"epoch": 2.37,
"grad_norm": 0.8817799091339111,
"learning_rate": 8.126330731014905e-05,
"loss": 0.7223,
"step": 1675
},
{
"epoch": 2.37,
"grad_norm": 0.8838679194450378,
"learning_rate": 8.119233498935416e-05,
"loss": 0.7361,
"step": 1676
},
{
"epoch": 2.38,
"grad_norm": 0.9286802411079407,
"learning_rate": 8.112136266855926e-05,
"loss": 0.8229,
"step": 1677
},
{
"epoch": 2.38,
"grad_norm": 0.877018392086029,
"learning_rate": 8.105039034776437e-05,
"loss": 0.7307,
"step": 1678
},
{
"epoch": 2.38,
"grad_norm": 0.8911111950874329,
"learning_rate": 8.097941802696948e-05,
"loss": 0.7706,
"step": 1679
},
{
"epoch": 2.38,
"grad_norm": 0.8774330615997314,
"learning_rate": 8.09084457061746e-05,
"loss": 0.7347,
"step": 1680
},
{
"epoch": 2.38,
"grad_norm": 0.8946451544761658,
"learning_rate": 8.083747338537971e-05,
"loss": 0.7148,
"step": 1681
},
{
"epoch": 2.38,
"grad_norm": 0.9081972241401672,
"learning_rate": 8.076650106458482e-05,
"loss": 0.7809,
"step": 1682
},
{
"epoch": 2.38,
"grad_norm": 0.8362038731575012,
"learning_rate": 8.069552874378993e-05,
"loss": 0.7048,
"step": 1683
},
{
"epoch": 2.39,
"grad_norm": 0.8865955471992493,
"learning_rate": 8.062455642299503e-05,
"loss": 0.7437,
"step": 1684
},
{
"epoch": 2.39,
"grad_norm": 0.9173011779785156,
"learning_rate": 8.055358410220015e-05,
"loss": 0.7158,
"step": 1685
},
{
"epoch": 2.39,
"grad_norm": 0.8824982047080994,
"learning_rate": 8.048261178140526e-05,
"loss": 0.7485,
"step": 1686
},
{
"epoch": 2.39,
"grad_norm": 0.8951696753501892,
"learning_rate": 8.041163946061037e-05,
"loss": 0.7133,
"step": 1687
},
{
"epoch": 2.39,
"grad_norm": 0.8572990298271179,
"learning_rate": 8.034066713981547e-05,
"loss": 0.7082,
"step": 1688
},
{
"epoch": 2.39,
"grad_norm": 0.8780161738395691,
"learning_rate": 8.02696948190206e-05,
"loss": 0.6906,
"step": 1689
},
{
"epoch": 2.39,
"grad_norm": 0.8812801241874695,
"learning_rate": 8.01987224982257e-05,
"loss": 0.7179,
"step": 1690
},
{
"epoch": 2.4,
"grad_norm": 0.8629257082939148,
"learning_rate": 8.012775017743081e-05,
"loss": 0.6859,
"step": 1691
},
{
"epoch": 2.4,
"grad_norm": 0.8868771195411682,
"learning_rate": 8.005677785663591e-05,
"loss": 0.7254,
"step": 1692
},
{
"epoch": 2.4,
"grad_norm": 0.8882796764373779,
"learning_rate": 7.998580553584103e-05,
"loss": 0.7425,
"step": 1693
},
{
"epoch": 2.4,
"grad_norm": 0.9046310782432556,
"learning_rate": 7.991483321504613e-05,
"loss": 0.7593,
"step": 1694
},
{
"epoch": 2.4,
"grad_norm": 0.9165465831756592,
"learning_rate": 7.984386089425125e-05,
"loss": 0.7138,
"step": 1695
},
{
"epoch": 2.4,
"grad_norm": 0.8907314538955688,
"learning_rate": 7.977288857345636e-05,
"loss": 0.734,
"step": 1696
},
{
"epoch": 2.4,
"grad_norm": 0.9012109041213989,
"learning_rate": 7.970191625266147e-05,
"loss": 0.7136,
"step": 1697
},
{
"epoch": 2.41,
"grad_norm": 0.8788906335830688,
"learning_rate": 7.963094393186657e-05,
"loss": 0.7005,
"step": 1698
},
{
"epoch": 2.41,
"grad_norm": 0.9376303553581238,
"learning_rate": 7.955997161107168e-05,
"loss": 0.7704,
"step": 1699
},
{
"epoch": 2.41,
"grad_norm": 0.8864246606826782,
"learning_rate": 7.94889992902768e-05,
"loss": 0.723,
"step": 1700
},
{
"epoch": 2.41,
"grad_norm": 0.9175934791564941,
"learning_rate": 7.941802696948191e-05,
"loss": 0.744,
"step": 1701
},
{
"epoch": 2.41,
"grad_norm": 0.8643361926078796,
"learning_rate": 7.934705464868701e-05,
"loss": 0.7499,
"step": 1702
},
{
"epoch": 2.41,
"grad_norm": 0.901351809501648,
"learning_rate": 7.927608232789212e-05,
"loss": 0.7667,
"step": 1703
},
{
"epoch": 2.41,
"grad_norm": 0.8338555693626404,
"learning_rate": 7.920511000709723e-05,
"loss": 0.6274,
"step": 1704
},
{
"epoch": 2.42,
"grad_norm": 0.8955431580543518,
"learning_rate": 7.913413768630235e-05,
"loss": 0.7669,
"step": 1705
},
{
"epoch": 2.42,
"grad_norm": 0.8964597582817078,
"learning_rate": 7.906316536550745e-05,
"loss": 0.699,
"step": 1706
},
{
"epoch": 2.42,
"grad_norm": 0.8782421350479126,
"learning_rate": 7.899219304471257e-05,
"loss": 0.7406,
"step": 1707
},
{
"epoch": 2.42,
"grad_norm": 0.9423942565917969,
"learning_rate": 7.892122072391767e-05,
"loss": 0.7557,
"step": 1708
},
{
"epoch": 2.42,
"grad_norm": 0.8742157816886902,
"learning_rate": 7.885024840312278e-05,
"loss": 0.6471,
"step": 1709
},
{
"epoch": 2.42,
"grad_norm": 0.9231082201004028,
"learning_rate": 7.87792760823279e-05,
"loss": 0.7049,
"step": 1710
},
{
"epoch": 2.42,
"grad_norm": 0.9250949025154114,
"learning_rate": 7.870830376153301e-05,
"loss": 0.7393,
"step": 1711
},
{
"epoch": 2.43,
"grad_norm": 0.8757654428482056,
"learning_rate": 7.863733144073812e-05,
"loss": 0.7057,
"step": 1712
},
{
"epoch": 2.43,
"grad_norm": 0.8953589200973511,
"learning_rate": 7.856635911994322e-05,
"loss": 0.7286,
"step": 1713
},
{
"epoch": 2.43,
"grad_norm": 0.9081451296806335,
"learning_rate": 7.849538679914833e-05,
"loss": 0.7699,
"step": 1714
},
{
"epoch": 2.43,
"grad_norm": 0.8773189783096313,
"learning_rate": 7.842441447835345e-05,
"loss": 0.763,
"step": 1715
},
{
"epoch": 2.43,
"grad_norm": 0.8770549297332764,
"learning_rate": 7.835344215755856e-05,
"loss": 0.7245,
"step": 1716
},
{
"epoch": 2.43,
"grad_norm": 0.9066706895828247,
"learning_rate": 7.828246983676366e-05,
"loss": 0.7746,
"step": 1717
},
{
"epoch": 2.43,
"grad_norm": 0.901549756526947,
"learning_rate": 7.821149751596878e-05,
"loss": 0.7526,
"step": 1718
},
{
"epoch": 2.44,
"grad_norm": 0.894270122051239,
"learning_rate": 7.814052519517388e-05,
"loss": 0.7267,
"step": 1719
},
{
"epoch": 2.44,
"grad_norm": 0.9308816194534302,
"learning_rate": 7.8069552874379e-05,
"loss": 0.7116,
"step": 1720
},
{
"epoch": 2.44,
"grad_norm": 0.8835683465003967,
"learning_rate": 7.799858055358411e-05,
"loss": 0.726,
"step": 1721
},
{
"epoch": 2.44,
"grad_norm": 0.8589925169944763,
"learning_rate": 7.792760823278922e-05,
"loss": 0.7438,
"step": 1722
},
{
"epoch": 2.44,
"grad_norm": 0.910952091217041,
"learning_rate": 7.785663591199432e-05,
"loss": 0.7553,
"step": 1723
},
{
"epoch": 2.44,
"grad_norm": 0.8601446747779846,
"learning_rate": 7.778566359119943e-05,
"loss": 0.7107,
"step": 1724
},
{
"epoch": 2.44,
"grad_norm": 0.8741260170936584,
"learning_rate": 7.771469127040455e-05,
"loss": 0.7428,
"step": 1725
},
{
"epoch": 2.45,
"grad_norm": 0.9290738701820374,
"learning_rate": 7.764371894960966e-05,
"loss": 0.7675,
"step": 1726
},
{
"epoch": 2.45,
"grad_norm": 0.8710927963256836,
"learning_rate": 7.757274662881476e-05,
"loss": 0.6856,
"step": 1727
},
{
"epoch": 2.45,
"grad_norm": 0.9226647615432739,
"learning_rate": 7.750177430801989e-05,
"loss": 0.8229,
"step": 1728
},
{
"epoch": 2.45,
"grad_norm": 0.8699370622634888,
"learning_rate": 7.743080198722498e-05,
"loss": 0.7268,
"step": 1729
},
{
"epoch": 2.45,
"grad_norm": 0.8959556221961975,
"learning_rate": 7.73598296664301e-05,
"loss": 0.7698,
"step": 1730
},
{
"epoch": 2.45,
"grad_norm": 0.8745953440666199,
"learning_rate": 7.72888573456352e-05,
"loss": 0.7181,
"step": 1731
},
{
"epoch": 2.45,
"grad_norm": 0.8749833703041077,
"learning_rate": 7.721788502484032e-05,
"loss": 0.792,
"step": 1732
},
{
"epoch": 2.46,
"grad_norm": 0.9342421889305115,
"learning_rate": 7.714691270404542e-05,
"loss": 0.7907,
"step": 1733
},
{
"epoch": 2.46,
"grad_norm": 0.8828331232070923,
"learning_rate": 7.707594038325053e-05,
"loss": 0.708,
"step": 1734
},
{
"epoch": 2.46,
"grad_norm": 0.8733470439910889,
"learning_rate": 7.700496806245565e-05,
"loss": 0.7326,
"step": 1735
},
{
"epoch": 2.46,
"grad_norm": 0.9026939272880554,
"learning_rate": 7.693399574166076e-05,
"loss": 0.7149,
"step": 1736
},
{
"epoch": 2.46,
"grad_norm": 0.870780348777771,
"learning_rate": 7.686302342086586e-05,
"loss": 0.745,
"step": 1737
},
{
"epoch": 2.46,
"grad_norm": 0.8790858387947083,
"learning_rate": 7.679205110007097e-05,
"loss": 0.7532,
"step": 1738
},
{
"epoch": 2.46,
"grad_norm": 0.8772610425949097,
"learning_rate": 7.672107877927608e-05,
"loss": 0.6456,
"step": 1739
},
{
"epoch": 2.47,
"grad_norm": 0.9049152731895447,
"learning_rate": 7.66501064584812e-05,
"loss": 0.7901,
"step": 1740
},
{
"epoch": 2.47,
"grad_norm": 0.8748606443405151,
"learning_rate": 7.657913413768631e-05,
"loss": 0.7347,
"step": 1741
},
{
"epoch": 2.47,
"grad_norm": 0.8891382813453674,
"learning_rate": 7.650816181689141e-05,
"loss": 0.6857,
"step": 1742
},
{
"epoch": 2.47,
"grad_norm": 0.9277533292770386,
"learning_rate": 7.643718949609652e-05,
"loss": 0.7585,
"step": 1743
},
{
"epoch": 2.47,
"grad_norm": 0.8965739011764526,
"learning_rate": 7.636621717530163e-05,
"loss": 0.7177,
"step": 1744
},
{
"epoch": 2.47,
"grad_norm": 0.8912781476974487,
"learning_rate": 7.629524485450675e-05,
"loss": 0.7499,
"step": 1745
},
{
"epoch": 2.47,
"grad_norm": 0.925321102142334,
"learning_rate": 7.622427253371186e-05,
"loss": 0.7524,
"step": 1746
},
{
"epoch": 2.47,
"grad_norm": 0.888886570930481,
"learning_rate": 7.615330021291697e-05,
"loss": 0.7251,
"step": 1747
},
{
"epoch": 2.48,
"grad_norm": 0.8739018440246582,
"learning_rate": 7.608232789212207e-05,
"loss": 0.7421,
"step": 1748
},
{
"epoch": 2.48,
"grad_norm": 0.8653773665428162,
"learning_rate": 7.601135557132718e-05,
"loss": 0.7121,
"step": 1749
},
{
"epoch": 2.48,
"grad_norm": 0.9399885535240173,
"learning_rate": 7.59403832505323e-05,
"loss": 0.8104,
"step": 1750
},
{
"epoch": 2.48,
"grad_norm": 0.8935320377349854,
"learning_rate": 7.586941092973741e-05,
"loss": 0.7285,
"step": 1751
},
{
"epoch": 2.48,
"grad_norm": 0.8893241286277771,
"learning_rate": 7.579843860894251e-05,
"loss": 0.711,
"step": 1752
},
{
"epoch": 2.48,
"grad_norm": 0.8903502225875854,
"learning_rate": 7.572746628814764e-05,
"loss": 0.74,
"step": 1753
},
{
"epoch": 2.48,
"grad_norm": 0.8928572535514832,
"learning_rate": 7.565649396735273e-05,
"loss": 0.7333,
"step": 1754
},
{
"epoch": 2.49,
"grad_norm": 0.8857137560844421,
"learning_rate": 7.558552164655785e-05,
"loss": 0.7382,
"step": 1755
},
{
"epoch": 2.49,
"grad_norm": 0.9179412722587585,
"learning_rate": 7.551454932576295e-05,
"loss": 0.7226,
"step": 1756
},
{
"epoch": 2.49,
"grad_norm": 0.9129919409751892,
"learning_rate": 7.544357700496807e-05,
"loss": 0.7687,
"step": 1757
},
{
"epoch": 2.49,
"grad_norm": 0.9090712666511536,
"learning_rate": 7.537260468417317e-05,
"loss": 0.7881,
"step": 1758
},
{
"epoch": 2.49,
"grad_norm": 0.9000692963600159,
"learning_rate": 7.530163236337828e-05,
"loss": 0.7255,
"step": 1759
},
{
"epoch": 2.49,
"grad_norm": 0.8983575701713562,
"learning_rate": 7.52306600425834e-05,
"loss": 0.7276,
"step": 1760
},
{
"epoch": 2.49,
"grad_norm": 0.9295850396156311,
"learning_rate": 7.515968772178851e-05,
"loss": 0.7576,
"step": 1761
},
{
"epoch": 2.5,
"grad_norm": 0.8923923373222351,
"learning_rate": 7.508871540099361e-05,
"loss": 0.7553,
"step": 1762
},
{
"epoch": 2.5,
"grad_norm": 0.9035564064979553,
"learning_rate": 7.501774308019872e-05,
"loss": 0.757,
"step": 1763
},
{
"epoch": 2.5,
"grad_norm": 0.8981541991233826,
"learning_rate": 7.494677075940383e-05,
"loss": 0.7188,
"step": 1764
},
{
"epoch": 2.5,
"grad_norm": 0.9119907021522522,
"learning_rate": 7.487579843860895e-05,
"loss": 0.73,
"step": 1765
},
{
"epoch": 2.5,
"grad_norm": 0.9041171669960022,
"learning_rate": 7.480482611781405e-05,
"loss": 0.7081,
"step": 1766
},
{
"epoch": 2.5,
"grad_norm": 0.8893494606018066,
"learning_rate": 7.473385379701917e-05,
"loss": 0.7115,
"step": 1767
},
{
"epoch": 2.5,
"grad_norm": 0.9286924004554749,
"learning_rate": 7.466288147622427e-05,
"loss": 0.7761,
"step": 1768
},
{
"epoch": 2.51,
"grad_norm": 0.9019243121147156,
"learning_rate": 7.459190915542938e-05,
"loss": 0.6966,
"step": 1769
},
{
"epoch": 2.51,
"grad_norm": 0.88730788230896,
"learning_rate": 7.45209368346345e-05,
"loss": 0.6791,
"step": 1770
},
{
"epoch": 2.51,
"grad_norm": 0.9363870620727539,
"learning_rate": 7.444996451383961e-05,
"loss": 0.7713,
"step": 1771
},
{
"epoch": 2.51,
"grad_norm": 0.8667078614234924,
"learning_rate": 7.437899219304472e-05,
"loss": 0.7231,
"step": 1772
},
{
"epoch": 2.51,
"grad_norm": 0.91669100522995,
"learning_rate": 7.430801987224982e-05,
"loss": 0.7306,
"step": 1773
},
{
"epoch": 2.51,
"grad_norm": 0.9173486828804016,
"learning_rate": 7.423704755145493e-05,
"loss": 0.714,
"step": 1774
},
{
"epoch": 2.51,
"grad_norm": 0.931074321269989,
"learning_rate": 7.416607523066005e-05,
"loss": 0.7594,
"step": 1775
},
{
"epoch": 2.52,
"grad_norm": 0.8770849704742432,
"learning_rate": 7.409510290986516e-05,
"loss": 0.7178,
"step": 1776
},
{
"epoch": 2.52,
"grad_norm": 0.9106402397155762,
"learning_rate": 7.402413058907026e-05,
"loss": 0.732,
"step": 1777
},
{
"epoch": 2.52,
"grad_norm": 0.9002892374992371,
"learning_rate": 7.395315826827539e-05,
"loss": 0.7282,
"step": 1778
},
{
"epoch": 2.52,
"grad_norm": 0.9161092638969421,
"learning_rate": 7.388218594748048e-05,
"loss": 0.7786,
"step": 1779
},
{
"epoch": 2.52,
"grad_norm": 0.9152305126190186,
"learning_rate": 7.38112136266856e-05,
"loss": 0.7226,
"step": 1780
},
{
"epoch": 2.52,
"grad_norm": 0.8772377371788025,
"learning_rate": 7.374024130589071e-05,
"loss": 0.7233,
"step": 1781
},
{
"epoch": 2.52,
"grad_norm": 0.9021669030189514,
"learning_rate": 7.366926898509582e-05,
"loss": 0.7336,
"step": 1782
},
{
"epoch": 2.53,
"grad_norm": 0.8629419207572937,
"learning_rate": 7.359829666430092e-05,
"loss": 0.7086,
"step": 1783
},
{
"epoch": 2.53,
"grad_norm": 0.8985278010368347,
"learning_rate": 7.352732434350604e-05,
"loss": 0.7495,
"step": 1784
},
{
"epoch": 2.53,
"grad_norm": 0.8771053552627563,
"learning_rate": 7.345635202271115e-05,
"loss": 0.7,
"step": 1785
},
{
"epoch": 2.53,
"grad_norm": 0.8981669545173645,
"learning_rate": 7.338537970191626e-05,
"loss": 0.6897,
"step": 1786
},
{
"epoch": 2.53,
"grad_norm": 0.9112007021903992,
"learning_rate": 7.331440738112136e-05,
"loss": 0.7408,
"step": 1787
},
{
"epoch": 2.53,
"grad_norm": 0.9054397940635681,
"learning_rate": 7.324343506032647e-05,
"loss": 0.7054,
"step": 1788
},
{
"epoch": 2.53,
"grad_norm": 0.869867205619812,
"learning_rate": 7.317246273953159e-05,
"loss": 0.6891,
"step": 1789
},
{
"epoch": 2.54,
"grad_norm": 0.90545654296875,
"learning_rate": 7.31014904187367e-05,
"loss": 0.7293,
"step": 1790
},
{
"epoch": 2.54,
"grad_norm": 0.9081699848175049,
"learning_rate": 7.30305180979418e-05,
"loss": 0.7445,
"step": 1791
},
{
"epoch": 2.54,
"grad_norm": 0.9117109775543213,
"learning_rate": 7.295954577714692e-05,
"loss": 0.7802,
"step": 1792
},
{
"epoch": 2.54,
"grad_norm": 0.9705706834793091,
"learning_rate": 7.288857345635202e-05,
"loss": 0.7802,
"step": 1793
},
{
"epoch": 2.54,
"grad_norm": 0.9153757095336914,
"learning_rate": 7.281760113555714e-05,
"loss": 0.7444,
"step": 1794
},
{
"epoch": 2.54,
"grad_norm": 0.8785500526428223,
"learning_rate": 7.274662881476223e-05,
"loss": 0.6899,
"step": 1795
},
{
"epoch": 2.54,
"grad_norm": 0.9095218777656555,
"learning_rate": 7.267565649396736e-05,
"loss": 0.7234,
"step": 1796
},
{
"epoch": 2.55,
"grad_norm": 0.8905136585235596,
"learning_rate": 7.260468417317246e-05,
"loss": 0.7169,
"step": 1797
},
{
"epoch": 2.55,
"grad_norm": 0.9118934869766235,
"learning_rate": 7.253371185237757e-05,
"loss": 0.7301,
"step": 1798
},
{
"epoch": 2.55,
"grad_norm": 0.9225103259086609,
"learning_rate": 7.246273953158269e-05,
"loss": 0.798,
"step": 1799
},
{
"epoch": 2.55,
"grad_norm": 0.8873489499092102,
"learning_rate": 7.23917672107878e-05,
"loss": 0.7378,
"step": 1800
},
{
"epoch": 2.55,
"grad_norm": 0.8982245922088623,
"learning_rate": 7.232079488999291e-05,
"loss": 0.7738,
"step": 1801
},
{
"epoch": 2.55,
"grad_norm": 0.8934197425842285,
"learning_rate": 7.224982256919801e-05,
"loss": 0.6641,
"step": 1802
},
{
"epoch": 2.55,
"grad_norm": 0.9242858290672302,
"learning_rate": 7.217885024840312e-05,
"loss": 0.7361,
"step": 1803
},
{
"epoch": 2.56,
"grad_norm": 0.8764644861221313,
"learning_rate": 7.210787792760824e-05,
"loss": 0.7311,
"step": 1804
},
{
"epoch": 2.56,
"grad_norm": 0.8844506144523621,
"learning_rate": 7.203690560681335e-05,
"loss": 0.7216,
"step": 1805
},
{
"epoch": 2.56,
"grad_norm": 0.8994446396827698,
"learning_rate": 7.196593328601846e-05,
"loss": 0.7009,
"step": 1806
},
{
"epoch": 2.56,
"grad_norm": 0.9672061204910278,
"learning_rate": 7.189496096522357e-05,
"loss": 0.8095,
"step": 1807
},
{
"epoch": 2.56,
"grad_norm": 0.929573655128479,
"learning_rate": 7.182398864442867e-05,
"loss": 0.7629,
"step": 1808
},
{
"epoch": 2.56,
"grad_norm": 0.8954296708106995,
"learning_rate": 7.175301632363379e-05,
"loss": 0.7393,
"step": 1809
},
{
"epoch": 2.56,
"grad_norm": 0.9068436026573181,
"learning_rate": 7.16820440028389e-05,
"loss": 0.7749,
"step": 1810
},
{
"epoch": 2.57,
"grad_norm": 0.8931055665016174,
"learning_rate": 7.161107168204401e-05,
"loss": 0.7452,
"step": 1811
},
{
"epoch": 2.57,
"grad_norm": 0.9035736918449402,
"learning_rate": 7.154009936124911e-05,
"loss": 0.7888,
"step": 1812
},
{
"epoch": 2.57,
"grad_norm": 0.9025890231132507,
"learning_rate": 7.146912704045424e-05,
"loss": 0.7025,
"step": 1813
},
{
"epoch": 2.57,
"grad_norm": 0.8863286972045898,
"learning_rate": 7.139815471965934e-05,
"loss": 0.7307,
"step": 1814
},
{
"epoch": 2.57,
"grad_norm": 0.8847543597221375,
"learning_rate": 7.132718239886445e-05,
"loss": 0.7458,
"step": 1815
},
{
"epoch": 2.57,
"grad_norm": 0.8659204840660095,
"learning_rate": 7.125621007806955e-05,
"loss": 0.753,
"step": 1816
},
{
"epoch": 2.57,
"grad_norm": 0.9039578437805176,
"learning_rate": 7.118523775727467e-05,
"loss": 0.7161,
"step": 1817
},
{
"epoch": 2.58,
"grad_norm": 0.935804545879364,
"learning_rate": 7.111426543647977e-05,
"loss": 0.7696,
"step": 1818
},
{
"epoch": 2.58,
"grad_norm": 0.8946291208267212,
"learning_rate": 7.104329311568489e-05,
"loss": 0.743,
"step": 1819
},
{
"epoch": 2.58,
"grad_norm": 0.9393401145935059,
"learning_rate": 7.097232079489e-05,
"loss": 0.737,
"step": 1820
},
{
"epoch": 2.58,
"grad_norm": 0.8611619472503662,
"learning_rate": 7.090134847409511e-05,
"loss": 0.6716,
"step": 1821
},
{
"epoch": 2.58,
"grad_norm": 0.8942080140113831,
"learning_rate": 7.083037615330021e-05,
"loss": 0.7581,
"step": 1822
},
{
"epoch": 2.58,
"grad_norm": 0.9117892980575562,
"learning_rate": 7.075940383250532e-05,
"loss": 0.7086,
"step": 1823
},
{
"epoch": 2.58,
"grad_norm": 0.8960133790969849,
"learning_rate": 7.068843151171044e-05,
"loss": 0.7807,
"step": 1824
},
{
"epoch": 2.59,
"grad_norm": 0.9127882719039917,
"learning_rate": 7.061745919091555e-05,
"loss": 0.7258,
"step": 1825
},
{
"epoch": 2.59,
"grad_norm": 0.9065718650817871,
"learning_rate": 7.054648687012065e-05,
"loss": 0.7771,
"step": 1826
},
{
"epoch": 2.59,
"grad_norm": 0.9114099144935608,
"learning_rate": 7.047551454932577e-05,
"loss": 0.7564,
"step": 1827
},
{
"epoch": 2.59,
"grad_norm": 0.8710154294967651,
"learning_rate": 7.040454222853087e-05,
"loss": 0.691,
"step": 1828
},
{
"epoch": 2.59,
"grad_norm": 0.8883103132247925,
"learning_rate": 7.033356990773599e-05,
"loss": 0.797,
"step": 1829
},
{
"epoch": 2.59,
"grad_norm": 0.8676865100860596,
"learning_rate": 7.02625975869411e-05,
"loss": 0.707,
"step": 1830
},
{
"epoch": 2.59,
"grad_norm": 0.8942274451255798,
"learning_rate": 7.019162526614621e-05,
"loss": 0.7155,
"step": 1831
},
{
"epoch": 2.6,
"grad_norm": 0.9445174336433411,
"learning_rate": 7.012065294535132e-05,
"loss": 0.7967,
"step": 1832
},
{
"epoch": 2.6,
"grad_norm": 0.9001713991165161,
"learning_rate": 7.004968062455642e-05,
"loss": 0.7332,
"step": 1833
},
{
"epoch": 2.6,
"grad_norm": 0.9131384491920471,
"learning_rate": 6.997870830376154e-05,
"loss": 0.719,
"step": 1834
},
{
"epoch": 2.6,
"grad_norm": 0.9131073355674744,
"learning_rate": 6.990773598296665e-05,
"loss": 0.7155,
"step": 1835
},
{
"epoch": 2.6,
"grad_norm": 0.9039396643638611,
"learning_rate": 6.983676366217176e-05,
"loss": 0.7428,
"step": 1836
},
{
"epoch": 2.6,
"grad_norm": 0.9080092906951904,
"learning_rate": 6.976579134137686e-05,
"loss": 0.7161,
"step": 1837
},
{
"epoch": 2.6,
"grad_norm": 0.8788297772407532,
"learning_rate": 6.969481902058199e-05,
"loss": 0.7092,
"step": 1838
},
{
"epoch": 2.61,
"grad_norm": 0.8493945598602295,
"learning_rate": 6.962384669978709e-05,
"loss": 0.6627,
"step": 1839
},
{
"epoch": 2.61,
"grad_norm": 0.9463279843330383,
"learning_rate": 6.95528743789922e-05,
"loss": 0.7828,
"step": 1840
},
{
"epoch": 2.61,
"grad_norm": 0.9215744733810425,
"learning_rate": 6.94819020581973e-05,
"loss": 0.7036,
"step": 1841
},
{
"epoch": 2.61,
"grad_norm": 0.882713258266449,
"learning_rate": 6.941092973740242e-05,
"loss": 0.7174,
"step": 1842
},
{
"epoch": 2.61,
"grad_norm": 0.8597182631492615,
"learning_rate": 6.933995741660752e-05,
"loss": 0.7197,
"step": 1843
},
{
"epoch": 2.61,
"grad_norm": 0.8798161745071411,
"learning_rate": 6.926898509581264e-05,
"loss": 0.7363,
"step": 1844
},
{
"epoch": 2.61,
"grad_norm": 0.8880274891853333,
"learning_rate": 6.919801277501775e-05,
"loss": 0.7668,
"step": 1845
},
{
"epoch": 2.62,
"grad_norm": 0.9019714593887329,
"learning_rate": 6.912704045422286e-05,
"loss": 0.7559,
"step": 1846
},
{
"epoch": 2.62,
"grad_norm": 0.9086313843727112,
"learning_rate": 6.905606813342796e-05,
"loss": 0.747,
"step": 1847
},
{
"epoch": 2.62,
"grad_norm": 0.9187281131744385,
"learning_rate": 6.898509581263307e-05,
"loss": 0.7866,
"step": 1848
},
{
"epoch": 2.62,
"grad_norm": 0.9017612338066101,
"learning_rate": 6.891412349183819e-05,
"loss": 0.7581,
"step": 1849
},
{
"epoch": 2.62,
"grad_norm": 0.9309147000312805,
"learning_rate": 6.88431511710433e-05,
"loss": 0.7512,
"step": 1850
},
{
"epoch": 2.62,
"grad_norm": 0.8845265507698059,
"learning_rate": 6.87721788502484e-05,
"loss": 0.7758,
"step": 1851
},
{
"epoch": 2.62,
"grad_norm": 0.8595403432846069,
"learning_rate": 6.870120652945352e-05,
"loss": 0.665,
"step": 1852
},
{
"epoch": 2.63,
"grad_norm": 0.9455280303955078,
"learning_rate": 6.863023420865862e-05,
"loss": 0.7181,
"step": 1853
},
{
"epoch": 2.63,
"grad_norm": 0.8818469047546387,
"learning_rate": 6.855926188786374e-05,
"loss": 0.6943,
"step": 1854
},
{
"epoch": 2.63,
"grad_norm": 0.8975241780281067,
"learning_rate": 6.848828956706884e-05,
"loss": 0.7303,
"step": 1855
},
{
"epoch": 2.63,
"grad_norm": 0.8924829363822937,
"learning_rate": 6.841731724627396e-05,
"loss": 0.7318,
"step": 1856
},
{
"epoch": 2.63,
"grad_norm": 0.9156109690666199,
"learning_rate": 6.834634492547906e-05,
"loss": 0.7303,
"step": 1857
},
{
"epoch": 2.63,
"grad_norm": 0.8979274034500122,
"learning_rate": 6.827537260468417e-05,
"loss": 0.7247,
"step": 1858
},
{
"epoch": 2.63,
"grad_norm": 0.9190047979354858,
"learning_rate": 6.820440028388929e-05,
"loss": 0.7229,
"step": 1859
},
{
"epoch": 2.64,
"grad_norm": 0.8945170640945435,
"learning_rate": 6.81334279630944e-05,
"loss": 0.7494,
"step": 1860
},
{
"epoch": 2.64,
"grad_norm": 0.9074508547782898,
"learning_rate": 6.806245564229951e-05,
"loss": 0.7074,
"step": 1861
},
{
"epoch": 2.64,
"grad_norm": 0.884390115737915,
"learning_rate": 6.799148332150461e-05,
"loss": 0.6879,
"step": 1862
},
{
"epoch": 2.64,
"grad_norm": 0.8993882536888123,
"learning_rate": 6.792051100070972e-05,
"loss": 0.7678,
"step": 1863
},
{
"epoch": 2.64,
"grad_norm": 0.9103842377662659,
"learning_rate": 6.784953867991484e-05,
"loss": 0.6994,
"step": 1864
},
{
"epoch": 2.64,
"grad_norm": 0.8733007311820984,
"learning_rate": 6.777856635911995e-05,
"loss": 0.6775,
"step": 1865
},
{
"epoch": 2.64,
"grad_norm": 0.9129227995872498,
"learning_rate": 6.770759403832506e-05,
"loss": 0.7798,
"step": 1866
},
{
"epoch": 2.64,
"grad_norm": 0.8990234732627869,
"learning_rate": 6.763662171753017e-05,
"loss": 0.7268,
"step": 1867
},
{
"epoch": 2.65,
"grad_norm": 0.92862868309021,
"learning_rate": 6.756564939673527e-05,
"loss": 0.7577,
"step": 1868
},
{
"epoch": 2.65,
"grad_norm": 0.901336133480072,
"learning_rate": 6.749467707594039e-05,
"loss": 0.7165,
"step": 1869
},
{
"epoch": 2.65,
"grad_norm": 0.8477864861488342,
"learning_rate": 6.74237047551455e-05,
"loss": 0.6478,
"step": 1870
},
{
"epoch": 2.65,
"grad_norm": 0.8649226427078247,
"learning_rate": 6.735273243435061e-05,
"loss": 0.7218,
"step": 1871
},
{
"epoch": 2.65,
"grad_norm": 0.895254373550415,
"learning_rate": 6.728176011355571e-05,
"loss": 0.7311,
"step": 1872
},
{
"epoch": 2.65,
"grad_norm": 0.8805977702140808,
"learning_rate": 6.721078779276084e-05,
"loss": 0.6869,
"step": 1873
},
{
"epoch": 2.65,
"grad_norm": 0.8643150329589844,
"learning_rate": 6.713981547196594e-05,
"loss": 0.6596,
"step": 1874
},
{
"epoch": 2.66,
"grad_norm": 0.9333701133728027,
"learning_rate": 6.706884315117105e-05,
"loss": 0.732,
"step": 1875
},
{
"epoch": 2.66,
"grad_norm": 0.9489626884460449,
"learning_rate": 6.699787083037615e-05,
"loss": 0.7647,
"step": 1876
},
{
"epoch": 2.66,
"grad_norm": 0.8754850029945374,
"learning_rate": 6.692689850958127e-05,
"loss": 0.7136,
"step": 1877
},
{
"epoch": 2.66,
"grad_norm": 0.95391845703125,
"learning_rate": 6.685592618878637e-05,
"loss": 0.7988,
"step": 1878
},
{
"epoch": 2.66,
"grad_norm": 0.9592831134796143,
"learning_rate": 6.678495386799149e-05,
"loss": 0.7275,
"step": 1879
},
{
"epoch": 2.66,
"grad_norm": 0.9160040616989136,
"learning_rate": 6.671398154719659e-05,
"loss": 0.7331,
"step": 1880
},
{
"epoch": 2.66,
"grad_norm": 0.9226792454719543,
"learning_rate": 6.664300922640171e-05,
"loss": 0.7482,
"step": 1881
},
{
"epoch": 2.67,
"grad_norm": 0.9255938529968262,
"learning_rate": 6.657203690560681e-05,
"loss": 0.7231,
"step": 1882
},
{
"epoch": 2.67,
"grad_norm": 0.8794947862625122,
"learning_rate": 6.650106458481192e-05,
"loss": 0.6833,
"step": 1883
},
{
"epoch": 2.67,
"grad_norm": 0.9187234044075012,
"learning_rate": 6.643009226401704e-05,
"loss": 0.7916,
"step": 1884
},
{
"epoch": 2.67,
"grad_norm": 0.933605432510376,
"learning_rate": 6.635911994322215e-05,
"loss": 0.7747,
"step": 1885
},
{
"epoch": 2.67,
"grad_norm": 0.904399037361145,
"learning_rate": 6.628814762242725e-05,
"loss": 0.7911,
"step": 1886
},
{
"epoch": 2.67,
"grad_norm": 0.8907902240753174,
"learning_rate": 6.621717530163236e-05,
"loss": 0.7408,
"step": 1887
},
{
"epoch": 2.67,
"grad_norm": 0.865777313709259,
"learning_rate": 6.614620298083747e-05,
"loss": 0.6625,
"step": 1888
},
{
"epoch": 2.68,
"grad_norm": 0.9031429886817932,
"learning_rate": 6.607523066004259e-05,
"loss": 0.7341,
"step": 1889
},
{
"epoch": 2.68,
"grad_norm": 0.9169644713401794,
"learning_rate": 6.60042583392477e-05,
"loss": 0.7321,
"step": 1890
},
{
"epoch": 2.68,
"grad_norm": 0.9195284247398376,
"learning_rate": 6.593328601845281e-05,
"loss": 0.7736,
"step": 1891
},
{
"epoch": 2.68,
"grad_norm": 0.9451195001602173,
"learning_rate": 6.586231369765791e-05,
"loss": 0.8059,
"step": 1892
},
{
"epoch": 2.68,
"grad_norm": 0.8922215700149536,
"learning_rate": 6.579134137686302e-05,
"loss": 0.6853,
"step": 1893
},
{
"epoch": 2.68,
"grad_norm": 0.9502759575843811,
"learning_rate": 6.572036905606814e-05,
"loss": 0.719,
"step": 1894
},
{
"epoch": 2.68,
"grad_norm": 0.9210609793663025,
"learning_rate": 6.564939673527325e-05,
"loss": 0.726,
"step": 1895
},
{
"epoch": 2.69,
"grad_norm": 0.9551956653594971,
"learning_rate": 6.557842441447836e-05,
"loss": 0.8149,
"step": 1896
},
{
"epoch": 2.69,
"grad_norm": 0.8991343379020691,
"learning_rate": 6.550745209368346e-05,
"loss": 0.7282,
"step": 1897
},
{
"epoch": 2.69,
"grad_norm": 0.8829290866851807,
"learning_rate": 6.543647977288859e-05,
"loss": 0.7694,
"step": 1898
},
{
"epoch": 2.69,
"grad_norm": 0.9269784092903137,
"learning_rate": 6.536550745209369e-05,
"loss": 0.7331,
"step": 1899
},
{
"epoch": 2.69,
"grad_norm": 0.8731749057769775,
"learning_rate": 6.52945351312988e-05,
"loss": 0.6979,
"step": 1900
},
{
"epoch": 2.69,
"grad_norm": 0.8528725504875183,
"learning_rate": 6.52235628105039e-05,
"loss": 0.7293,
"step": 1901
},
{
"epoch": 2.69,
"grad_norm": 0.8936817049980164,
"learning_rate": 6.515259048970902e-05,
"loss": 0.7219,
"step": 1902
},
{
"epoch": 2.7,
"grad_norm": 0.8615530133247375,
"learning_rate": 6.508161816891412e-05,
"loss": 0.7127,
"step": 1903
},
{
"epoch": 2.7,
"grad_norm": 0.9263240694999695,
"learning_rate": 6.501064584811924e-05,
"loss": 0.7366,
"step": 1904
},
{
"epoch": 2.7,
"grad_norm": 0.894145667552948,
"learning_rate": 6.493967352732435e-05,
"loss": 0.7331,
"step": 1905
},
{
"epoch": 2.7,
"grad_norm": 0.8919436931610107,
"learning_rate": 6.486870120652946e-05,
"loss": 0.7227,
"step": 1906
},
{
"epoch": 2.7,
"grad_norm": 0.9151416420936584,
"learning_rate": 6.479772888573456e-05,
"loss": 0.749,
"step": 1907
},
{
"epoch": 2.7,
"grad_norm": 0.9217795729637146,
"learning_rate": 6.472675656493967e-05,
"loss": 0.7824,
"step": 1908
},
{
"epoch": 2.7,
"grad_norm": 0.9153746366500854,
"learning_rate": 6.465578424414479e-05,
"loss": 0.7191,
"step": 1909
},
{
"epoch": 2.71,
"grad_norm": 0.8637219667434692,
"learning_rate": 6.45848119233499e-05,
"loss": 0.7216,
"step": 1910
},
{
"epoch": 2.71,
"grad_norm": 0.9067671895027161,
"learning_rate": 6.4513839602555e-05,
"loss": 0.7083,
"step": 1911
},
{
"epoch": 2.71,
"grad_norm": 0.9326914548873901,
"learning_rate": 6.444286728176013e-05,
"loss": 0.7884,
"step": 1912
},
{
"epoch": 2.71,
"grad_norm": 0.8889709711074829,
"learning_rate": 6.437189496096522e-05,
"loss": 0.7329,
"step": 1913
},
{
"epoch": 2.71,
"grad_norm": 0.8617497086524963,
"learning_rate": 6.430092264017034e-05,
"loss": 0.7025,
"step": 1914
},
{
"epoch": 2.71,
"grad_norm": 0.9106520414352417,
"learning_rate": 6.422995031937544e-05,
"loss": 0.7113,
"step": 1915
},
{
"epoch": 2.71,
"grad_norm": 0.9133102893829346,
"learning_rate": 6.415897799858056e-05,
"loss": 0.7495,
"step": 1916
},
{
"epoch": 2.72,
"grad_norm": 0.8778348565101624,
"learning_rate": 6.408800567778566e-05,
"loss": 0.691,
"step": 1917
},
{
"epoch": 2.72,
"grad_norm": 0.8651662468910217,
"learning_rate": 6.401703335699077e-05,
"loss": 0.6919,
"step": 1918
},
{
"epoch": 2.72,
"grad_norm": 0.9208177924156189,
"learning_rate": 6.394606103619589e-05,
"loss": 0.7614,
"step": 1919
},
{
"epoch": 2.72,
"grad_norm": 0.8430038094520569,
"learning_rate": 6.3875088715401e-05,
"loss": 0.6678,
"step": 1920
},
{
"epoch": 2.72,
"grad_norm": 0.8798860907554626,
"learning_rate": 6.380411639460611e-05,
"loss": 0.7001,
"step": 1921
},
{
"epoch": 2.72,
"grad_norm": 0.9283832311630249,
"learning_rate": 6.373314407381121e-05,
"loss": 0.6932,
"step": 1922
},
{
"epoch": 2.72,
"grad_norm": 0.9014415740966797,
"learning_rate": 6.366217175301632e-05,
"loss": 0.7526,
"step": 1923
},
{
"epoch": 2.73,
"grad_norm": 0.9172340035438538,
"learning_rate": 6.359119943222144e-05,
"loss": 0.7518,
"step": 1924
},
{
"epoch": 2.73,
"grad_norm": 0.8508337736129761,
"learning_rate": 6.352022711142655e-05,
"loss": 0.7273,
"step": 1925
},
{
"epoch": 2.73,
"grad_norm": 0.909666895866394,
"learning_rate": 6.344925479063165e-05,
"loss": 0.7415,
"step": 1926
},
{
"epoch": 2.73,
"grad_norm": 0.8876848220825195,
"learning_rate": 6.337828246983678e-05,
"loss": 0.7353,
"step": 1927
},
{
"epoch": 2.73,
"grad_norm": 0.9056611657142639,
"learning_rate": 6.330731014904187e-05,
"loss": 0.7776,
"step": 1928
},
{
"epoch": 2.73,
"grad_norm": 0.9231218099594116,
"learning_rate": 6.323633782824699e-05,
"loss": 0.8055,
"step": 1929
},
{
"epoch": 2.73,
"grad_norm": 0.9067128896713257,
"learning_rate": 6.31653655074521e-05,
"loss": 0.7259,
"step": 1930
},
{
"epoch": 2.74,
"grad_norm": 0.8786343336105347,
"learning_rate": 6.309439318665721e-05,
"loss": 0.7008,
"step": 1931
},
{
"epoch": 2.74,
"grad_norm": 0.9066187143325806,
"learning_rate": 6.302342086586231e-05,
"loss": 0.7225,
"step": 1932
},
{
"epoch": 2.74,
"grad_norm": 0.8969742059707642,
"learning_rate": 6.295244854506742e-05,
"loss": 0.733,
"step": 1933
},
{
"epoch": 2.74,
"grad_norm": 0.9022447466850281,
"learning_rate": 6.288147622427254e-05,
"loss": 0.7418,
"step": 1934
},
{
"epoch": 2.74,
"grad_norm": 0.9096084833145142,
"learning_rate": 6.281050390347765e-05,
"loss": 0.7125,
"step": 1935
},
{
"epoch": 2.74,
"grad_norm": 0.9495100378990173,
"learning_rate": 6.273953158268275e-05,
"loss": 0.7232,
"step": 1936
},
{
"epoch": 2.74,
"grad_norm": 0.9231953024864197,
"learning_rate": 6.266855926188788e-05,
"loss": 0.7536,
"step": 1937
},
{
"epoch": 2.75,
"grad_norm": 0.9400760531425476,
"learning_rate": 6.259758694109297e-05,
"loss": 0.7545,
"step": 1938
},
{
"epoch": 2.75,
"grad_norm": 0.899880051612854,
"learning_rate": 6.252661462029809e-05,
"loss": 0.7119,
"step": 1939
},
{
"epoch": 2.75,
"grad_norm": 0.9516352415084839,
"learning_rate": 6.245564229950319e-05,
"loss": 0.7314,
"step": 1940
},
{
"epoch": 2.75,
"grad_norm": 0.9273751378059387,
"learning_rate": 6.238466997870831e-05,
"loss": 0.7458,
"step": 1941
},
{
"epoch": 2.75,
"grad_norm": 0.8848811984062195,
"learning_rate": 6.231369765791341e-05,
"loss": 0.6936,
"step": 1942
},
{
"epoch": 2.75,
"grad_norm": 0.9142954349517822,
"learning_rate": 6.224272533711852e-05,
"loss": 0.7136,
"step": 1943
},
{
"epoch": 2.75,
"grad_norm": 0.8864548802375793,
"learning_rate": 6.217175301632364e-05,
"loss": 0.7343,
"step": 1944
},
{
"epoch": 2.76,
"grad_norm": 0.8726581931114197,
"learning_rate": 6.210078069552875e-05,
"loss": 0.7012,
"step": 1945
},
{
"epoch": 2.76,
"grad_norm": 0.8997368812561035,
"learning_rate": 6.202980837473385e-05,
"loss": 0.7389,
"step": 1946
},
{
"epoch": 2.76,
"grad_norm": 0.87708979845047,
"learning_rate": 6.195883605393896e-05,
"loss": 0.6718,
"step": 1947
},
{
"epoch": 2.76,
"grad_norm": 0.8872997164726257,
"learning_rate": 6.188786373314407e-05,
"loss": 0.7301,
"step": 1948
},
{
"epoch": 2.76,
"grad_norm": 0.9232304692268372,
"learning_rate": 6.181689141234919e-05,
"loss": 0.7711,
"step": 1949
},
{
"epoch": 2.76,
"grad_norm": 0.8790326714515686,
"learning_rate": 6.17459190915543e-05,
"loss": 0.6659,
"step": 1950
},
{
"epoch": 2.76,
"grad_norm": 0.8775014281272888,
"learning_rate": 6.167494677075941e-05,
"loss": 0.6797,
"step": 1951
},
{
"epoch": 2.77,
"grad_norm": 0.9247701168060303,
"learning_rate": 6.160397444996451e-05,
"loss": 0.7555,
"step": 1952
},
{
"epoch": 2.77,
"grad_norm": 0.9140119552612305,
"learning_rate": 6.153300212916962e-05,
"loss": 0.7253,
"step": 1953
},
{
"epoch": 2.77,
"grad_norm": 0.9283299446105957,
"learning_rate": 6.146202980837474e-05,
"loss": 0.7503,
"step": 1954
},
{
"epoch": 2.77,
"grad_norm": 0.9792236685752869,
"learning_rate": 6.139105748757985e-05,
"loss": 0.8229,
"step": 1955
},
{
"epoch": 2.77,
"grad_norm": 0.9352570176124573,
"learning_rate": 6.132008516678496e-05,
"loss": 0.7465,
"step": 1956
},
{
"epoch": 2.77,
"grad_norm": 0.9299400448799133,
"learning_rate": 6.124911284599006e-05,
"loss": 0.7143,
"step": 1957
},
{
"epoch": 2.77,
"grad_norm": 0.9018230438232422,
"learning_rate": 6.117814052519519e-05,
"loss": 0.7457,
"step": 1958
},
{
"epoch": 2.78,
"grad_norm": 0.9120910167694092,
"learning_rate": 6.110716820440029e-05,
"loss": 0.718,
"step": 1959
},
{
"epoch": 2.78,
"grad_norm": 0.9172790050506592,
"learning_rate": 6.10361958836054e-05,
"loss": 0.7895,
"step": 1960
},
{
"epoch": 2.78,
"grad_norm": 0.9235164523124695,
"learning_rate": 6.09652235628105e-05,
"loss": 0.7568,
"step": 1961
},
{
"epoch": 2.78,
"grad_norm": 0.915084183216095,
"learning_rate": 6.089425124201562e-05,
"loss": 0.7685,
"step": 1962
},
{
"epoch": 2.78,
"grad_norm": 0.8504372835159302,
"learning_rate": 6.0823278921220725e-05,
"loss": 0.7283,
"step": 1963
},
{
"epoch": 2.78,
"grad_norm": 0.8520084619522095,
"learning_rate": 6.075230660042584e-05,
"loss": 0.6907,
"step": 1964
},
{
"epoch": 2.78,
"grad_norm": 0.8992055654525757,
"learning_rate": 6.0681334279630944e-05,
"loss": 0.7292,
"step": 1965
},
{
"epoch": 2.79,
"grad_norm": 0.9049320220947266,
"learning_rate": 6.0610361958836056e-05,
"loss": 0.7567,
"step": 1966
},
{
"epoch": 2.79,
"grad_norm": 0.9057159423828125,
"learning_rate": 6.053938963804117e-05,
"loss": 0.6883,
"step": 1967
},
{
"epoch": 2.79,
"grad_norm": 0.9200868010520935,
"learning_rate": 6.0468417317246275e-05,
"loss": 0.7741,
"step": 1968
},
{
"epoch": 2.79,
"grad_norm": 0.9224002957344055,
"learning_rate": 6.039744499645139e-05,
"loss": 0.7559,
"step": 1969
},
{
"epoch": 2.79,
"grad_norm": 0.9786500930786133,
"learning_rate": 6.03264726756565e-05,
"loss": 0.7951,
"step": 1970
},
{
"epoch": 2.79,
"grad_norm": 0.9218242168426514,
"learning_rate": 6.0255500354861607e-05,
"loss": 0.741,
"step": 1971
},
{
"epoch": 2.79,
"grad_norm": 0.9101900458335876,
"learning_rate": 6.018452803406671e-05,
"loss": 0.7705,
"step": 1972
},
{
"epoch": 2.8,
"grad_norm": 0.8758111596107483,
"learning_rate": 6.011355571327183e-05,
"loss": 0.705,
"step": 1973
},
{
"epoch": 2.8,
"grad_norm": 0.9413437247276306,
"learning_rate": 6.004258339247694e-05,
"loss": 0.705,
"step": 1974
},
{
"epoch": 2.8,
"grad_norm": 0.9429991841316223,
"learning_rate": 5.9971611071682044e-05,
"loss": 0.7461,
"step": 1975
},
{
"epoch": 2.8,
"grad_norm": 0.8979993462562561,
"learning_rate": 5.9900638750887163e-05,
"loss": 0.6894,
"step": 1976
},
{
"epoch": 2.8,
"grad_norm": 0.9537708163261414,
"learning_rate": 5.982966643009227e-05,
"loss": 0.7837,
"step": 1977
},
{
"epoch": 2.8,
"grad_norm": 0.9004853367805481,
"learning_rate": 5.9758694109297375e-05,
"loss": 0.7106,
"step": 1978
},
{
"epoch": 2.8,
"grad_norm": 0.9031293988227844,
"learning_rate": 5.968772178850248e-05,
"loss": 0.7171,
"step": 1979
},
{
"epoch": 2.81,
"grad_norm": 0.8935607671737671,
"learning_rate": 5.96167494677076e-05,
"loss": 0.7401,
"step": 1980
},
{
"epoch": 2.81,
"grad_norm": 0.8977299928665161,
"learning_rate": 5.954577714691271e-05,
"loss": 0.7532,
"step": 1981
},
{
"epoch": 2.81,
"grad_norm": 0.8968639373779297,
"learning_rate": 5.947480482611781e-05,
"loss": 0.7306,
"step": 1982
},
{
"epoch": 2.81,
"grad_norm": 0.9397892951965332,
"learning_rate": 5.940383250532293e-05,
"loss": 0.8076,
"step": 1983
},
{
"epoch": 2.81,
"grad_norm": 0.9595673084259033,
"learning_rate": 5.933286018452804e-05,
"loss": 0.7778,
"step": 1984
},
{
"epoch": 2.81,
"grad_norm": 0.9051293134689331,
"learning_rate": 5.9261887863733144e-05,
"loss": 0.7372,
"step": 1985
},
{
"epoch": 2.81,
"grad_norm": 0.919461190700531,
"learning_rate": 5.919091554293825e-05,
"loss": 0.68,
"step": 1986
},
{
"epoch": 2.81,
"grad_norm": 0.947431743144989,
"learning_rate": 5.911994322214337e-05,
"loss": 0.7676,
"step": 1987
},
{
"epoch": 2.82,
"grad_norm": 0.9109405875205994,
"learning_rate": 5.9048970901348475e-05,
"loss": 0.7342,
"step": 1988
},
{
"epoch": 2.82,
"grad_norm": 0.9157077074050903,
"learning_rate": 5.897799858055358e-05,
"loss": 0.7254,
"step": 1989
},
{
"epoch": 2.82,
"grad_norm": 0.9192777276039124,
"learning_rate": 5.89070262597587e-05,
"loss": 0.816,
"step": 1990
},
{
"epoch": 2.82,
"grad_norm": 0.8862888813018799,
"learning_rate": 5.883605393896381e-05,
"loss": 0.693,
"step": 1991
},
{
"epoch": 2.82,
"grad_norm": 0.8700743317604065,
"learning_rate": 5.876508161816891e-05,
"loss": 0.7199,
"step": 1992
},
{
"epoch": 2.82,
"grad_norm": 0.861030638217926,
"learning_rate": 5.8694109297374026e-05,
"loss": 0.7043,
"step": 1993
},
{
"epoch": 2.82,
"grad_norm": 0.865680456161499,
"learning_rate": 5.862313697657914e-05,
"loss": 0.7142,
"step": 1994
},
{
"epoch": 2.83,
"grad_norm": 0.8904520869255066,
"learning_rate": 5.8552164655784244e-05,
"loss": 0.6742,
"step": 1995
},
{
"epoch": 2.83,
"grad_norm": 0.906788170337677,
"learning_rate": 5.848119233498936e-05,
"loss": 0.745,
"step": 1996
},
{
"epoch": 2.83,
"grad_norm": 0.8844560980796814,
"learning_rate": 5.841022001419447e-05,
"loss": 0.6675,
"step": 1997
},
{
"epoch": 2.83,
"grad_norm": 0.9228169918060303,
"learning_rate": 5.833924769339958e-05,
"loss": 0.7112,
"step": 1998
},
{
"epoch": 2.83,
"grad_norm": 0.9667858481407166,
"learning_rate": 5.826827537260469e-05,
"loss": 0.76,
"step": 1999
},
{
"epoch": 2.83,
"grad_norm": 0.9407635927200317,
"learning_rate": 5.8197303051809794e-05,
"loss": 0.7387,
"step": 2000
},
{
"epoch": 2.83,
"grad_norm": 0.9030634164810181,
"learning_rate": 5.8126330731014914e-05,
"loss": 0.6959,
"step": 2001
},
{
"epoch": 2.84,
"grad_norm": 0.9199838638305664,
"learning_rate": 5.805535841022002e-05,
"loss": 0.7235,
"step": 2002
},
{
"epoch": 2.84,
"grad_norm": 0.9523055553436279,
"learning_rate": 5.7984386089425126e-05,
"loss": 0.7482,
"step": 2003
},
{
"epoch": 2.84,
"grad_norm": 0.8901258707046509,
"learning_rate": 5.7913413768630245e-05,
"loss": 0.7052,
"step": 2004
},
{
"epoch": 2.84,
"grad_norm": 0.919878363609314,
"learning_rate": 5.784244144783535e-05,
"loss": 0.7624,
"step": 2005
},
{
"epoch": 2.84,
"grad_norm": 0.9046053886413574,
"learning_rate": 5.777146912704046e-05,
"loss": 0.6738,
"step": 2006
},
{
"epoch": 2.84,
"grad_norm": 0.9026862978935242,
"learning_rate": 5.770049680624556e-05,
"loss": 0.7391,
"step": 2007
},
{
"epoch": 2.84,
"grad_norm": 0.8612751960754395,
"learning_rate": 5.762952448545068e-05,
"loss": 0.6809,
"step": 2008
},
{
"epoch": 2.85,
"grad_norm": 0.909647524356842,
"learning_rate": 5.755855216465579e-05,
"loss": 0.7777,
"step": 2009
},
{
"epoch": 2.85,
"grad_norm": 0.9045459032058716,
"learning_rate": 5.7487579843860894e-05,
"loss": 0.7579,
"step": 2010
},
{
"epoch": 2.85,
"grad_norm": 0.8565319180488586,
"learning_rate": 5.7416607523066e-05,
"loss": 0.6686,
"step": 2011
},
{
"epoch": 2.85,
"grad_norm": 0.8806788325309753,
"learning_rate": 5.734563520227112e-05,
"loss": 0.7081,
"step": 2012
},
{
"epoch": 2.85,
"grad_norm": 0.9146614074707031,
"learning_rate": 5.7274662881476226e-05,
"loss": 0.7461,
"step": 2013
},
{
"epoch": 2.85,
"grad_norm": 0.9521096348762512,
"learning_rate": 5.720369056068133e-05,
"loss": 0.7745,
"step": 2014
},
{
"epoch": 2.85,
"grad_norm": 0.9208850264549255,
"learning_rate": 5.713271823988645e-05,
"loss": 0.7343,
"step": 2015
},
{
"epoch": 2.86,
"grad_norm": 0.9413381814956665,
"learning_rate": 5.706174591909156e-05,
"loss": 0.744,
"step": 2016
},
{
"epoch": 2.86,
"grad_norm": 0.9284963011741638,
"learning_rate": 5.699077359829666e-05,
"loss": 0.7354,
"step": 2017
},
{
"epoch": 2.86,
"grad_norm": 0.9209182262420654,
"learning_rate": 5.691980127750177e-05,
"loss": 0.7992,
"step": 2018
},
{
"epoch": 2.86,
"grad_norm": 0.8778393268585205,
"learning_rate": 5.684882895670689e-05,
"loss": 0.7211,
"step": 2019
},
{
"epoch": 2.86,
"grad_norm": 0.9171314239501953,
"learning_rate": 5.6777856635911995e-05,
"loss": 0.7308,
"step": 2020
},
{
"epoch": 2.86,
"grad_norm": 0.9178740382194519,
"learning_rate": 5.67068843151171e-05,
"loss": 0.7791,
"step": 2021
},
{
"epoch": 2.86,
"grad_norm": 0.9107670187950134,
"learning_rate": 5.663591199432222e-05,
"loss": 0.7673,
"step": 2022
},
{
"epoch": 2.87,
"grad_norm": 0.8879327178001404,
"learning_rate": 5.6564939673527326e-05,
"loss": 0.7244,
"step": 2023
},
{
"epoch": 2.87,
"grad_norm": 0.8985375165939331,
"learning_rate": 5.649396735273244e-05,
"loss": 0.735,
"step": 2024
},
{
"epoch": 2.87,
"grad_norm": 0.8995974659919739,
"learning_rate": 5.6422995031937545e-05,
"loss": 0.7454,
"step": 2025
},
{
"epoch": 2.87,
"grad_norm": 0.9525470733642578,
"learning_rate": 5.635202271114266e-05,
"loss": 0.7669,
"step": 2026
},
{
"epoch": 2.87,
"grad_norm": 0.9079092144966125,
"learning_rate": 5.628105039034777e-05,
"loss": 0.7018,
"step": 2027
},
{
"epoch": 2.87,
"grad_norm": 0.9271257519721985,
"learning_rate": 5.6210078069552876e-05,
"loss": 0.7295,
"step": 2028
},
{
"epoch": 2.87,
"grad_norm": 0.943726122379303,
"learning_rate": 5.613910574875799e-05,
"loss": 0.7472,
"step": 2029
},
{
"epoch": 2.88,
"grad_norm": 0.9313754439353943,
"learning_rate": 5.60681334279631e-05,
"loss": 0.7358,
"step": 2030
},
{
"epoch": 2.88,
"grad_norm": 0.9185075163841248,
"learning_rate": 5.599716110716821e-05,
"loss": 0.7313,
"step": 2031
},
{
"epoch": 2.88,
"grad_norm": 0.9241396188735962,
"learning_rate": 5.5926188786373313e-05,
"loss": 0.7114,
"step": 2032
},
{
"epoch": 2.88,
"grad_norm": 0.884719967842102,
"learning_rate": 5.585521646557843e-05,
"loss": 0.727,
"step": 2033
},
{
"epoch": 2.88,
"grad_norm": 0.8894933462142944,
"learning_rate": 5.578424414478354e-05,
"loss": 0.7598,
"step": 2034
},
{
"epoch": 2.88,
"grad_norm": 0.8764230608940125,
"learning_rate": 5.5713271823988645e-05,
"loss": 0.7433,
"step": 2035
},
{
"epoch": 2.88,
"grad_norm": 0.8621907234191895,
"learning_rate": 5.5642299503193764e-05,
"loss": 0.7148,
"step": 2036
},
{
"epoch": 2.89,
"grad_norm": 0.8671466112136841,
"learning_rate": 5.557132718239887e-05,
"loss": 0.7362,
"step": 2037
},
{
"epoch": 2.89,
"grad_norm": 0.875935435295105,
"learning_rate": 5.5500354861603976e-05,
"loss": 0.7166,
"step": 2038
},
{
"epoch": 2.89,
"grad_norm": 0.9075550436973572,
"learning_rate": 5.542938254080908e-05,
"loss": 0.7744,
"step": 2039
},
{
"epoch": 2.89,
"grad_norm": 0.9083443880081177,
"learning_rate": 5.53584102200142e-05,
"loss": 0.709,
"step": 2040
},
{
"epoch": 2.89,
"grad_norm": 0.8900015950202942,
"learning_rate": 5.528743789921931e-05,
"loss": 0.7487,
"step": 2041
},
{
"epoch": 2.89,
"grad_norm": 0.9004586338996887,
"learning_rate": 5.5216465578424414e-05,
"loss": 0.7373,
"step": 2042
},
{
"epoch": 2.89,
"grad_norm": 0.9120739698410034,
"learning_rate": 5.514549325762953e-05,
"loss": 0.7307,
"step": 2043
},
{
"epoch": 2.9,
"grad_norm": 0.9269658327102661,
"learning_rate": 5.507452093683464e-05,
"loss": 0.7593,
"step": 2044
},
{
"epoch": 2.9,
"grad_norm": 0.9356985688209534,
"learning_rate": 5.5003548616039745e-05,
"loss": 0.7623,
"step": 2045
},
{
"epoch": 2.9,
"grad_norm": 0.9285359382629395,
"learning_rate": 5.493257629524485e-05,
"loss": 0.7215,
"step": 2046
},
{
"epoch": 2.9,
"grad_norm": 0.892284095287323,
"learning_rate": 5.486160397444997e-05,
"loss": 0.7151,
"step": 2047
},
{
"epoch": 2.9,
"grad_norm": 0.9217097759246826,
"learning_rate": 5.4790631653655076e-05,
"loss": 0.7354,
"step": 2048
},
{
"epoch": 2.9,
"grad_norm": 0.9596689939498901,
"learning_rate": 5.471965933286018e-05,
"loss": 0.7735,
"step": 2049
},
{
"epoch": 2.9,
"grad_norm": 0.9006109237670898,
"learning_rate": 5.464868701206529e-05,
"loss": 0.6733,
"step": 2050
},
{
"epoch": 2.91,
"grad_norm": 0.9174147248268127,
"learning_rate": 5.457771469127041e-05,
"loss": 0.7423,
"step": 2051
},
{
"epoch": 2.91,
"grad_norm": 0.944945752620697,
"learning_rate": 5.4506742370475514e-05,
"loss": 0.7928,
"step": 2052
},
{
"epoch": 2.91,
"grad_norm": 0.8980944156646729,
"learning_rate": 5.4435770049680626e-05,
"loss": 0.6827,
"step": 2053
},
{
"epoch": 2.91,
"grad_norm": 0.9077931642532349,
"learning_rate": 5.436479772888574e-05,
"loss": 0.7515,
"step": 2054
},
{
"epoch": 2.91,
"grad_norm": 0.8934177756309509,
"learning_rate": 5.4293825408090845e-05,
"loss": 0.7314,
"step": 2055
},
{
"epoch": 2.91,
"grad_norm": 0.8950974345207214,
"learning_rate": 5.422285308729596e-05,
"loss": 0.7644,
"step": 2056
},
{
"epoch": 2.91,
"grad_norm": 0.9017379283905029,
"learning_rate": 5.4151880766501064e-05,
"loss": 0.7599,
"step": 2057
},
{
"epoch": 2.92,
"grad_norm": 0.8867719173431396,
"learning_rate": 5.4080908445706177e-05,
"loss": 0.7247,
"step": 2058
},
{
"epoch": 2.92,
"grad_norm": 0.8819170594215393,
"learning_rate": 5.400993612491129e-05,
"loss": 0.7039,
"step": 2059
},
{
"epoch": 2.92,
"grad_norm": 0.9082474708557129,
"learning_rate": 5.3938963804116395e-05,
"loss": 0.7261,
"step": 2060
},
{
"epoch": 2.92,
"grad_norm": 0.8912190198898315,
"learning_rate": 5.3867991483321515e-05,
"loss": 0.758,
"step": 2061
},
{
"epoch": 2.92,
"grad_norm": 1.0254679918289185,
"learning_rate": 5.379701916252662e-05,
"loss": 0.7901,
"step": 2062
},
{
"epoch": 2.92,
"grad_norm": 0.8879702687263489,
"learning_rate": 5.3726046841731727e-05,
"loss": 0.7172,
"step": 2063
},
{
"epoch": 2.92,
"grad_norm": 0.8472481966018677,
"learning_rate": 5.365507452093683e-05,
"loss": 0.7248,
"step": 2064
},
{
"epoch": 2.93,
"grad_norm": 0.9291859865188599,
"learning_rate": 5.358410220014195e-05,
"loss": 0.7221,
"step": 2065
},
{
"epoch": 2.93,
"grad_norm": 0.8945410251617432,
"learning_rate": 5.351312987934706e-05,
"loss": 0.6759,
"step": 2066
},
{
"epoch": 2.93,
"grad_norm": 0.8901339769363403,
"learning_rate": 5.3442157558552164e-05,
"loss": 0.7342,
"step": 2067
},
{
"epoch": 2.93,
"grad_norm": 0.8966627717018127,
"learning_rate": 5.3371185237757283e-05,
"loss": 0.6834,
"step": 2068
},
{
"epoch": 2.93,
"grad_norm": 0.9248061180114746,
"learning_rate": 5.330021291696239e-05,
"loss": 0.7561,
"step": 2069
},
{
"epoch": 2.93,
"grad_norm": 0.9252817630767822,
"learning_rate": 5.3229240596167495e-05,
"loss": 0.7436,
"step": 2070
},
{
"epoch": 2.93,
"grad_norm": 0.888176441192627,
"learning_rate": 5.31582682753726e-05,
"loss": 0.7017,
"step": 2071
},
{
"epoch": 2.94,
"grad_norm": 0.894523561000824,
"learning_rate": 5.308729595457772e-05,
"loss": 0.7224,
"step": 2072
},
{
"epoch": 2.94,
"grad_norm": 0.9010726809501648,
"learning_rate": 5.301632363378283e-05,
"loss": 0.7276,
"step": 2073
},
{
"epoch": 2.94,
"grad_norm": 0.9328283071517944,
"learning_rate": 5.294535131298793e-05,
"loss": 0.7599,
"step": 2074
},
{
"epoch": 2.94,
"grad_norm": 0.8821666836738586,
"learning_rate": 5.287437899219305e-05,
"loss": 0.7245,
"step": 2075
},
{
"epoch": 2.94,
"grad_norm": 0.9441255927085876,
"learning_rate": 5.280340667139816e-05,
"loss": 0.7344,
"step": 2076
},
{
"epoch": 2.94,
"grad_norm": 0.9148538708686829,
"learning_rate": 5.2732434350603264e-05,
"loss": 0.7172,
"step": 2077
},
{
"epoch": 2.94,
"grad_norm": 0.895046055316925,
"learning_rate": 5.266146202980837e-05,
"loss": 0.7425,
"step": 2078
},
{
"epoch": 2.95,
"grad_norm": 0.8581205606460571,
"learning_rate": 5.259048970901349e-05,
"loss": 0.703,
"step": 2079
},
{
"epoch": 2.95,
"grad_norm": 0.8967068195343018,
"learning_rate": 5.2519517388218595e-05,
"loss": 0.7363,
"step": 2080
},
{
"epoch": 2.95,
"grad_norm": 0.8934385180473328,
"learning_rate": 5.24485450674237e-05,
"loss": 0.7344,
"step": 2081
},
{
"epoch": 2.95,
"grad_norm": 0.9267292618751526,
"learning_rate": 5.237757274662882e-05,
"loss": 0.7429,
"step": 2082
},
{
"epoch": 2.95,
"grad_norm": 0.8986665606498718,
"learning_rate": 5.230660042583393e-05,
"loss": 0.7341,
"step": 2083
},
{
"epoch": 2.95,
"grad_norm": 0.9249792098999023,
"learning_rate": 5.223562810503903e-05,
"loss": 0.723,
"step": 2084
},
{
"epoch": 2.95,
"grad_norm": 0.9367862343788147,
"learning_rate": 5.2164655784244146e-05,
"loss": 0.7556,
"step": 2085
},
{
"epoch": 2.96,
"grad_norm": 0.9350747466087341,
"learning_rate": 5.209368346344926e-05,
"loss": 0.7687,
"step": 2086
},
{
"epoch": 2.96,
"grad_norm": 0.9194963574409485,
"learning_rate": 5.202271114265437e-05,
"loss": 0.7667,
"step": 2087
},
{
"epoch": 2.96,
"grad_norm": 0.910021185874939,
"learning_rate": 5.195173882185948e-05,
"loss": 0.7181,
"step": 2088
},
{
"epoch": 2.96,
"grad_norm": 0.9334052205085754,
"learning_rate": 5.188076650106459e-05,
"loss": 0.7773,
"step": 2089
},
{
"epoch": 2.96,
"grad_norm": 0.9095138311386108,
"learning_rate": 5.18097941802697e-05,
"loss": 0.7153,
"step": 2090
},
{
"epoch": 2.96,
"grad_norm": 0.8687496781349182,
"learning_rate": 5.173882185947481e-05,
"loss": 0.7251,
"step": 2091
},
{
"epoch": 2.96,
"grad_norm": 0.889177143573761,
"learning_rate": 5.1667849538679914e-05,
"loss": 0.7084,
"step": 2092
},
{
"epoch": 2.97,
"grad_norm": 0.8826084733009338,
"learning_rate": 5.1596877217885034e-05,
"loss": 0.6988,
"step": 2093
},
{
"epoch": 2.97,
"grad_norm": 0.891457736492157,
"learning_rate": 5.152590489709014e-05,
"loss": 0.7394,
"step": 2094
},
{
"epoch": 2.97,
"grad_norm": 0.8934881091117859,
"learning_rate": 5.1454932576295246e-05,
"loss": 0.6861,
"step": 2095
},
{
"epoch": 2.97,
"grad_norm": 0.9134221076965332,
"learning_rate": 5.138396025550035e-05,
"loss": 0.7227,
"step": 2096
},
{
"epoch": 2.97,
"grad_norm": 0.8815884590148926,
"learning_rate": 5.131298793470547e-05,
"loss": 0.675,
"step": 2097
},
{
"epoch": 2.97,
"grad_norm": 0.9253513813018799,
"learning_rate": 5.124201561391058e-05,
"loss": 0.723,
"step": 2098
},
{
"epoch": 2.97,
"grad_norm": 0.9489588141441345,
"learning_rate": 5.117104329311568e-05,
"loss": 0.7302,
"step": 2099
},
{
"epoch": 2.98,
"grad_norm": 0.9437028169631958,
"learning_rate": 5.11000709723208e-05,
"loss": 0.7355,
"step": 2100
},
{
"epoch": 2.98,
"grad_norm": 0.9252711534500122,
"learning_rate": 5.102909865152591e-05,
"loss": 0.7317,
"step": 2101
},
{
"epoch": 2.98,
"grad_norm": 0.9146744608879089,
"learning_rate": 5.0958126330731014e-05,
"loss": 0.7059,
"step": 2102
},
{
"epoch": 2.98,
"grad_norm": 0.9322413206100464,
"learning_rate": 5.088715400993612e-05,
"loss": 0.7326,
"step": 2103
},
{
"epoch": 2.98,
"grad_norm": 0.9023984670639038,
"learning_rate": 5.081618168914124e-05,
"loss": 0.7999,
"step": 2104
},
{
"epoch": 2.98,
"grad_norm": 0.8638840913772583,
"learning_rate": 5.0745209368346346e-05,
"loss": 0.7179,
"step": 2105
},
{
"epoch": 2.98,
"grad_norm": 0.8496398329734802,
"learning_rate": 5.067423704755145e-05,
"loss": 0.6619,
"step": 2106
},
{
"epoch": 2.98,
"grad_norm": 0.9351739287376404,
"learning_rate": 5.060326472675657e-05,
"loss": 0.7478,
"step": 2107
},
{
"epoch": 2.99,
"grad_norm": 0.9056074619293213,
"learning_rate": 5.053229240596168e-05,
"loss": 0.7747,
"step": 2108
},
{
"epoch": 2.99,
"grad_norm": 0.8992380499839783,
"learning_rate": 5.046132008516678e-05,
"loss": 0.733,
"step": 2109
},
{
"epoch": 2.99,
"grad_norm": 0.9041831493377686,
"learning_rate": 5.039034776437189e-05,
"loss": 0.7573,
"step": 2110
},
{
"epoch": 2.99,
"grad_norm": 0.9171251058578491,
"learning_rate": 5.031937544357701e-05,
"loss": 0.7364,
"step": 2111
},
{
"epoch": 2.99,
"grad_norm": 0.9151423573493958,
"learning_rate": 5.0248403122782115e-05,
"loss": 0.7923,
"step": 2112
},
{
"epoch": 2.99,
"grad_norm": 0.8989052176475525,
"learning_rate": 5.017743080198723e-05,
"loss": 0.7144,
"step": 2113
},
{
"epoch": 2.99,
"grad_norm": 0.9141291379928589,
"learning_rate": 5.010645848119234e-05,
"loss": 0.6852,
"step": 2114
},
{
"epoch": 3.0,
"grad_norm": 0.9194472432136536,
"learning_rate": 5.0035486160397446e-05,
"loss": 0.7559,
"step": 2115
},
{
"epoch": 3.0,
"grad_norm": 0.867283284664154,
"learning_rate": 4.996451383960256e-05,
"loss": 0.7188,
"step": 2116
},
{
"epoch": 3.0,
"grad_norm": 0.9092669486999512,
"learning_rate": 4.989354151880767e-05,
"loss": 0.7543,
"step": 2117
},
{
"epoch": 3.0,
"grad_norm": 0.8685262203216553,
"learning_rate": 4.982256919801278e-05,
"loss": 0.6828,
"step": 2118
},
{
"epoch": 3.0,
"grad_norm": 0.7743335962295532,
"learning_rate": 4.975159687721789e-05,
"loss": 0.5045,
"step": 2119
},
{
"epoch": 3.0,
"grad_norm": 0.8006496429443359,
"learning_rate": 4.9680624556423e-05,
"loss": 0.5517,
"step": 2120
},
{
"epoch": 3.0,
"grad_norm": 0.8102491497993469,
"learning_rate": 4.960965223562811e-05,
"loss": 0.504,
"step": 2121
},
{
"epoch": 3.01,
"grad_norm": 0.7949885129928589,
"learning_rate": 4.953867991483322e-05,
"loss": 0.4944,
"step": 2122
},
{
"epoch": 3.01,
"grad_norm": 0.8948567509651184,
"learning_rate": 4.946770759403833e-05,
"loss": 0.4933,
"step": 2123
},
{
"epoch": 3.01,
"grad_norm": 0.9919142723083496,
"learning_rate": 4.939673527324344e-05,
"loss": 0.558,
"step": 2124
},
{
"epoch": 3.01,
"grad_norm": 1.0344558954238892,
"learning_rate": 4.9325762952448546e-05,
"loss": 0.5067,
"step": 2125
},
{
"epoch": 3.01,
"grad_norm": 1.1019103527069092,
"learning_rate": 4.925479063165366e-05,
"loss": 0.5355,
"step": 2126
},
{
"epoch": 3.01,
"grad_norm": 1.170531153678894,
"learning_rate": 4.9183818310858765e-05,
"loss": 0.5802,
"step": 2127
},
{
"epoch": 3.01,
"grad_norm": 1.1433138847351074,
"learning_rate": 4.911284599006388e-05,
"loss": 0.5597,
"step": 2128
},
{
"epoch": 3.02,
"grad_norm": 1.1154450178146362,
"learning_rate": 4.904187366926899e-05,
"loss": 0.542,
"step": 2129
},
{
"epoch": 3.02,
"grad_norm": 1.0891841650009155,
"learning_rate": 4.8970901348474096e-05,
"loss": 0.559,
"step": 2130
},
{
"epoch": 3.02,
"grad_norm": 0.9962801337242126,
"learning_rate": 4.889992902767921e-05,
"loss": 0.5171,
"step": 2131
},
{
"epoch": 3.02,
"grad_norm": 0.9601651430130005,
"learning_rate": 4.8828956706884315e-05,
"loss": 0.5508,
"step": 2132
},
{
"epoch": 3.02,
"grad_norm": 0.8857892751693726,
"learning_rate": 4.875798438608943e-05,
"loss": 0.4933,
"step": 2133
},
{
"epoch": 3.02,
"grad_norm": 0.8972840905189514,
"learning_rate": 4.8687012065294534e-05,
"loss": 0.5005,
"step": 2134
},
{
"epoch": 3.02,
"grad_norm": 0.922217071056366,
"learning_rate": 4.8616039744499646e-05,
"loss": 0.5266,
"step": 2135
},
{
"epoch": 3.03,
"grad_norm": 0.8686092495918274,
"learning_rate": 4.854506742370476e-05,
"loss": 0.5273,
"step": 2136
},
{
"epoch": 3.03,
"grad_norm": 0.901107132434845,
"learning_rate": 4.8474095102909865e-05,
"loss": 0.49,
"step": 2137
},
{
"epoch": 3.03,
"grad_norm": 0.9138848185539246,
"learning_rate": 4.840312278211498e-05,
"loss": 0.5071,
"step": 2138
},
{
"epoch": 3.03,
"grad_norm": 0.9287373423576355,
"learning_rate": 4.8332150461320084e-05,
"loss": 0.5132,
"step": 2139
},
{
"epoch": 3.03,
"grad_norm": 0.9281260371208191,
"learning_rate": 4.8261178140525196e-05,
"loss": 0.5068,
"step": 2140
},
{
"epoch": 3.03,
"grad_norm": 0.9996431469917297,
"learning_rate": 4.81902058197303e-05,
"loss": 0.497,
"step": 2141
},
{
"epoch": 3.03,
"grad_norm": 1.0466423034667969,
"learning_rate": 4.8119233498935415e-05,
"loss": 0.5328,
"step": 2142
},
{
"epoch": 3.04,
"grad_norm": 1.0136125087738037,
"learning_rate": 4.804826117814053e-05,
"loss": 0.4828,
"step": 2143
},
{
"epoch": 3.04,
"grad_norm": 1.0378624200820923,
"learning_rate": 4.7977288857345634e-05,
"loss": 0.5387,
"step": 2144
},
{
"epoch": 3.04,
"grad_norm": 1.0223605632781982,
"learning_rate": 4.7906316536550746e-05,
"loss": 0.5476,
"step": 2145
},
{
"epoch": 3.04,
"grad_norm": 1.0363613367080688,
"learning_rate": 4.783534421575586e-05,
"loss": 0.5516,
"step": 2146
},
{
"epoch": 3.04,
"grad_norm": 0.9881285429000854,
"learning_rate": 4.776437189496097e-05,
"loss": 0.5342,
"step": 2147
},
{
"epoch": 3.04,
"grad_norm": 0.989923894405365,
"learning_rate": 4.769339957416608e-05,
"loss": 0.5694,
"step": 2148
},
{
"epoch": 3.04,
"grad_norm": 0.9041772484779358,
"learning_rate": 4.762242725337119e-05,
"loss": 0.4411,
"step": 2149
},
{
"epoch": 3.05,
"grad_norm": 0.9580135941505432,
"learning_rate": 4.7551454932576297e-05,
"loss": 0.5294,
"step": 2150
},
{
"epoch": 3.05,
"grad_norm": 0.9735754132270813,
"learning_rate": 4.748048261178141e-05,
"loss": 0.5518,
"step": 2151
},
{
"epoch": 3.05,
"grad_norm": 0.9906786680221558,
"learning_rate": 4.740951029098652e-05,
"loss": 0.5644,
"step": 2152
},
{
"epoch": 3.05,
"grad_norm": 0.9299677610397339,
"learning_rate": 4.733853797019163e-05,
"loss": 0.536,
"step": 2153
},
{
"epoch": 3.05,
"grad_norm": 0.9068963527679443,
"learning_rate": 4.726756564939674e-05,
"loss": 0.4836,
"step": 2154
},
{
"epoch": 3.05,
"grad_norm": 0.9534969925880432,
"learning_rate": 4.719659332860185e-05,
"loss": 0.5291,
"step": 2155
},
{
"epoch": 3.05,
"grad_norm": 1.0046902894973755,
"learning_rate": 4.712562100780696e-05,
"loss": 0.5388,
"step": 2156
},
{
"epoch": 3.06,
"grad_norm": 0.9631446003913879,
"learning_rate": 4.7054648687012065e-05,
"loss": 0.5067,
"step": 2157
},
{
"epoch": 3.06,
"grad_norm": 1.0535892248153687,
"learning_rate": 4.698367636621718e-05,
"loss": 0.5609,
"step": 2158
},
{
"epoch": 3.06,
"grad_norm": 0.9783676266670227,
"learning_rate": 4.691270404542229e-05,
"loss": 0.5228,
"step": 2159
},
{
"epoch": 3.06,
"grad_norm": 0.9673240184783936,
"learning_rate": 4.68417317246274e-05,
"loss": 0.5479,
"step": 2160
},
{
"epoch": 3.06,
"grad_norm": 0.9668963551521301,
"learning_rate": 4.677075940383251e-05,
"loss": 0.477,
"step": 2161
},
{
"epoch": 3.06,
"grad_norm": 0.971133291721344,
"learning_rate": 4.6699787083037615e-05,
"loss": 0.4775,
"step": 2162
},
{
"epoch": 3.06,
"grad_norm": 0.9844760298728943,
"learning_rate": 4.662881476224273e-05,
"loss": 0.5123,
"step": 2163
},
{
"epoch": 3.07,
"grad_norm": 0.9454901814460754,
"learning_rate": 4.6557842441447834e-05,
"loss": 0.5169,
"step": 2164
},
{
"epoch": 3.07,
"grad_norm": 0.9617669582366943,
"learning_rate": 4.648687012065295e-05,
"loss": 0.5141,
"step": 2165
},
{
"epoch": 3.07,
"grad_norm": 0.9806207418441772,
"learning_rate": 4.641589779985806e-05,
"loss": 0.4989,
"step": 2166
},
{
"epoch": 3.07,
"grad_norm": 0.929340124130249,
"learning_rate": 4.6344925479063165e-05,
"loss": 0.5034,
"step": 2167
},
{
"epoch": 3.07,
"grad_norm": 0.9709641933441162,
"learning_rate": 4.627395315826828e-05,
"loss": 0.4999,
"step": 2168
},
{
"epoch": 3.07,
"grad_norm": 0.9410818219184875,
"learning_rate": 4.6202980837473384e-05,
"loss": 0.4847,
"step": 2169
},
{
"epoch": 3.07,
"grad_norm": 0.9140300154685974,
"learning_rate": 4.61320085166785e-05,
"loss": 0.4953,
"step": 2170
},
{
"epoch": 3.08,
"grad_norm": 0.9406004548072815,
"learning_rate": 4.60610361958836e-05,
"loss": 0.5043,
"step": 2171
},
{
"epoch": 3.08,
"grad_norm": 0.9244046211242676,
"learning_rate": 4.5990063875088716e-05,
"loss": 0.4742,
"step": 2172
},
{
"epoch": 3.08,
"grad_norm": 0.9861319661140442,
"learning_rate": 4.591909155429383e-05,
"loss": 0.5383,
"step": 2173
},
{
"epoch": 3.08,
"grad_norm": 0.9648599028587341,
"learning_rate": 4.5848119233498934e-05,
"loss": 0.4916,
"step": 2174
},
{
"epoch": 3.08,
"grad_norm": 0.9462875127792358,
"learning_rate": 4.577714691270405e-05,
"loss": 0.4533,
"step": 2175
},
{
"epoch": 3.08,
"grad_norm": 1.0142507553100586,
"learning_rate": 4.570617459190916e-05,
"loss": 0.5145,
"step": 2176
},
{
"epoch": 3.08,
"grad_norm": 0.9575462341308594,
"learning_rate": 4.563520227111427e-05,
"loss": 0.471,
"step": 2177
},
{
"epoch": 3.09,
"grad_norm": 1.0417957305908203,
"learning_rate": 4.556422995031938e-05,
"loss": 0.5373,
"step": 2178
},
{
"epoch": 3.09,
"grad_norm": 0.9894844889640808,
"learning_rate": 4.549325762952449e-05,
"loss": 0.5464,
"step": 2179
},
{
"epoch": 3.09,
"grad_norm": 0.9838773608207703,
"learning_rate": 4.54222853087296e-05,
"loss": 0.5221,
"step": 2180
},
{
"epoch": 3.09,
"grad_norm": 0.9868409633636475,
"learning_rate": 4.535131298793471e-05,
"loss": 0.5311,
"step": 2181
},
{
"epoch": 3.09,
"grad_norm": 0.944324791431427,
"learning_rate": 4.528034066713982e-05,
"loss": 0.5446,
"step": 2182
},
{
"epoch": 3.09,
"grad_norm": 0.9434787034988403,
"learning_rate": 4.520936834634493e-05,
"loss": 0.5004,
"step": 2183
},
{
"epoch": 3.09,
"grad_norm": 0.9857025146484375,
"learning_rate": 4.513839602555004e-05,
"loss": 0.5373,
"step": 2184
},
{
"epoch": 3.1,
"grad_norm": 0.9952114224433899,
"learning_rate": 4.506742370475515e-05,
"loss": 0.5013,
"step": 2185
},
{
"epoch": 3.1,
"grad_norm": 0.9452676177024841,
"learning_rate": 4.499645138396026e-05,
"loss": 0.5079,
"step": 2186
},
{
"epoch": 3.1,
"grad_norm": 1.0157325267791748,
"learning_rate": 4.4925479063165366e-05,
"loss": 0.5197,
"step": 2187
},
{
"epoch": 3.1,
"grad_norm": 0.9407671093940735,
"learning_rate": 4.485450674237048e-05,
"loss": 0.4895,
"step": 2188
},
{
"epoch": 3.1,
"grad_norm": 0.9767752289772034,
"learning_rate": 4.478353442157559e-05,
"loss": 0.5002,
"step": 2189
},
{
"epoch": 3.1,
"grad_norm": 0.9899623990058899,
"learning_rate": 4.47125621007807e-05,
"loss": 0.4964,
"step": 2190
},
{
"epoch": 3.1,
"grad_norm": 1.0107783079147339,
"learning_rate": 4.464158977998581e-05,
"loss": 0.4909,
"step": 2191
},
{
"epoch": 3.11,
"grad_norm": 1.00058114528656,
"learning_rate": 4.4570617459190916e-05,
"loss": 0.5369,
"step": 2192
},
{
"epoch": 3.11,
"grad_norm": 0.9973490238189697,
"learning_rate": 4.449964513839603e-05,
"loss": 0.5219,
"step": 2193
},
{
"epoch": 3.11,
"grad_norm": 0.9762208461761475,
"learning_rate": 4.4428672817601135e-05,
"loss": 0.5218,
"step": 2194
},
{
"epoch": 3.11,
"grad_norm": 0.994667112827301,
"learning_rate": 4.435770049680625e-05,
"loss": 0.5097,
"step": 2195
},
{
"epoch": 3.11,
"grad_norm": 0.9990437030792236,
"learning_rate": 4.428672817601135e-05,
"loss": 0.5359,
"step": 2196
},
{
"epoch": 3.11,
"grad_norm": 0.9898905754089355,
"learning_rate": 4.4215755855216466e-05,
"loss": 0.4987,
"step": 2197
},
{
"epoch": 3.11,
"grad_norm": 0.9653964638710022,
"learning_rate": 4.414478353442158e-05,
"loss": 0.4742,
"step": 2198
},
{
"epoch": 3.12,
"grad_norm": 0.9734763503074646,
"learning_rate": 4.4073811213626685e-05,
"loss": 0.5236,
"step": 2199
},
{
"epoch": 3.12,
"grad_norm": 0.9727082848548889,
"learning_rate": 4.40028388928318e-05,
"loss": 0.5181,
"step": 2200
},
{
"epoch": 3.12,
"grad_norm": 1.0015164613723755,
"learning_rate": 4.39318665720369e-05,
"loss": 0.502,
"step": 2201
},
{
"epoch": 3.12,
"grad_norm": 0.965710461139679,
"learning_rate": 4.3860894251242016e-05,
"loss": 0.5046,
"step": 2202
},
{
"epoch": 3.12,
"grad_norm": 0.9759023189544678,
"learning_rate": 4.378992193044712e-05,
"loss": 0.5203,
"step": 2203
},
{
"epoch": 3.12,
"grad_norm": 0.9397246837615967,
"learning_rate": 4.3718949609652235e-05,
"loss": 0.4816,
"step": 2204
},
{
"epoch": 3.12,
"grad_norm": 0.979550302028656,
"learning_rate": 4.364797728885735e-05,
"loss": 0.5214,
"step": 2205
},
{
"epoch": 3.13,
"grad_norm": 0.9628053307533264,
"learning_rate": 4.357700496806246e-05,
"loss": 0.5117,
"step": 2206
},
{
"epoch": 3.13,
"grad_norm": 1.0112930536270142,
"learning_rate": 4.3506032647267566e-05,
"loss": 0.5711,
"step": 2207
},
{
"epoch": 3.13,
"grad_norm": 1.0025320053100586,
"learning_rate": 4.343506032647268e-05,
"loss": 0.5474,
"step": 2208
},
{
"epoch": 3.13,
"grad_norm": 1.0063894987106323,
"learning_rate": 4.336408800567779e-05,
"loss": 0.5618,
"step": 2209
},
{
"epoch": 3.13,
"grad_norm": 1.0305397510528564,
"learning_rate": 4.32931156848829e-05,
"loss": 0.5585,
"step": 2210
},
{
"epoch": 3.13,
"grad_norm": 0.9876142144203186,
"learning_rate": 4.322214336408801e-05,
"loss": 0.517,
"step": 2211
},
{
"epoch": 3.13,
"grad_norm": 0.9939770102500916,
"learning_rate": 4.315117104329312e-05,
"loss": 0.4898,
"step": 2212
},
{
"epoch": 3.14,
"grad_norm": 0.9995191097259521,
"learning_rate": 4.308019872249823e-05,
"loss": 0.5408,
"step": 2213
},
{
"epoch": 3.14,
"grad_norm": 1.0050206184387207,
"learning_rate": 4.300922640170334e-05,
"loss": 0.5386,
"step": 2214
},
{
"epoch": 3.14,
"grad_norm": 0.9904208183288574,
"learning_rate": 4.293825408090845e-05,
"loss": 0.5028,
"step": 2215
},
{
"epoch": 3.14,
"grad_norm": 1.012172818183899,
"learning_rate": 4.286728176011356e-05,
"loss": 0.5367,
"step": 2216
},
{
"epoch": 3.14,
"grad_norm": 0.981663703918457,
"learning_rate": 4.2796309439318666e-05,
"loss": 0.5185,
"step": 2217
},
{
"epoch": 3.14,
"grad_norm": 0.9844231009483337,
"learning_rate": 4.272533711852378e-05,
"loss": 0.5212,
"step": 2218
},
{
"epoch": 3.14,
"grad_norm": 0.9846050143241882,
"learning_rate": 4.2654364797728885e-05,
"loss": 0.5427,
"step": 2219
},
{
"epoch": 3.15,
"grad_norm": 0.9734468460083008,
"learning_rate": 4.2583392476934e-05,
"loss": 0.5248,
"step": 2220
},
{
"epoch": 3.15,
"grad_norm": 0.9388629794120789,
"learning_rate": 4.251242015613911e-05,
"loss": 0.4593,
"step": 2221
},
{
"epoch": 3.15,
"grad_norm": 0.9485184550285339,
"learning_rate": 4.2441447835344216e-05,
"loss": 0.4847,
"step": 2222
},
{
"epoch": 3.15,
"grad_norm": 1.0168582201004028,
"learning_rate": 4.237047551454933e-05,
"loss": 0.5204,
"step": 2223
},
{
"epoch": 3.15,
"grad_norm": 0.9456900358200073,
"learning_rate": 4.2299503193754435e-05,
"loss": 0.4953,
"step": 2224
},
{
"epoch": 3.15,
"grad_norm": 1.0003365278244019,
"learning_rate": 4.222853087295955e-05,
"loss": 0.5383,
"step": 2225
},
{
"epoch": 3.15,
"grad_norm": 1.0175565481185913,
"learning_rate": 4.2157558552164654e-05,
"loss": 0.5451,
"step": 2226
},
{
"epoch": 3.15,
"grad_norm": 1.0304280519485474,
"learning_rate": 4.2086586231369766e-05,
"loss": 0.5467,
"step": 2227
},
{
"epoch": 3.16,
"grad_norm": 0.9860724806785583,
"learning_rate": 4.201561391057488e-05,
"loss": 0.5399,
"step": 2228
},
{
"epoch": 3.16,
"grad_norm": 0.9857328534126282,
"learning_rate": 4.1944641589779985e-05,
"loss": 0.5208,
"step": 2229
},
{
"epoch": 3.16,
"grad_norm": 1.07256281375885,
"learning_rate": 4.18736692689851e-05,
"loss": 0.5466,
"step": 2230
},
{
"epoch": 3.16,
"grad_norm": 1.0171128511428833,
"learning_rate": 4.1802696948190204e-05,
"loss": 0.5309,
"step": 2231
},
{
"epoch": 3.16,
"grad_norm": 1.0043413639068604,
"learning_rate": 4.1731724627395316e-05,
"loss": 0.518,
"step": 2232
},
{
"epoch": 3.16,
"grad_norm": 1.0043971538543701,
"learning_rate": 4.166075230660042e-05,
"loss": 0.5515,
"step": 2233
},
{
"epoch": 3.16,
"grad_norm": 1.00337553024292,
"learning_rate": 4.1589779985805535e-05,
"loss": 0.5425,
"step": 2234
},
{
"epoch": 3.17,
"grad_norm": 1.0087389945983887,
"learning_rate": 4.151880766501065e-05,
"loss": 0.5237,
"step": 2235
},
{
"epoch": 3.17,
"grad_norm": 1.0248241424560547,
"learning_rate": 4.144783534421576e-05,
"loss": 0.5685,
"step": 2236
},
{
"epoch": 3.17,
"grad_norm": 1.0191607475280762,
"learning_rate": 4.1376863023420867e-05,
"loss": 0.5436,
"step": 2237
},
{
"epoch": 3.17,
"grad_norm": 0.9885305762290955,
"learning_rate": 4.130589070262598e-05,
"loss": 0.5082,
"step": 2238
},
{
"epoch": 3.17,
"grad_norm": 0.9865046739578247,
"learning_rate": 4.123491838183109e-05,
"loss": 0.5384,
"step": 2239
},
{
"epoch": 3.17,
"grad_norm": 0.984719455242157,
"learning_rate": 4.11639460610362e-05,
"loss": 0.5017,
"step": 2240
},
{
"epoch": 3.17,
"grad_norm": 0.9513193964958191,
"learning_rate": 4.109297374024131e-05,
"loss": 0.497,
"step": 2241
},
{
"epoch": 3.18,
"grad_norm": 1.0050876140594482,
"learning_rate": 4.102200141944642e-05,
"loss": 0.598,
"step": 2242
},
{
"epoch": 3.18,
"grad_norm": 0.9792728424072266,
"learning_rate": 4.095102909865153e-05,
"loss": 0.4996,
"step": 2243
},
{
"epoch": 3.18,
"grad_norm": 1.059917688369751,
"learning_rate": 4.088005677785664e-05,
"loss": 0.5584,
"step": 2244
},
{
"epoch": 3.18,
"grad_norm": 0.962172269821167,
"learning_rate": 4.080908445706175e-05,
"loss": 0.485,
"step": 2245
},
{
"epoch": 3.18,
"grad_norm": 0.9715234637260437,
"learning_rate": 4.073811213626686e-05,
"loss": 0.4752,
"step": 2246
},
{
"epoch": 3.18,
"grad_norm": 1.0108722448349,
"learning_rate": 4.066713981547197e-05,
"loss": 0.5053,
"step": 2247
},
{
"epoch": 3.18,
"grad_norm": 0.9890355467796326,
"learning_rate": 4.059616749467708e-05,
"loss": 0.514,
"step": 2248
},
{
"epoch": 3.19,
"grad_norm": 0.966625988483429,
"learning_rate": 4.0525195173882185e-05,
"loss": 0.4707,
"step": 2249
},
{
"epoch": 3.19,
"grad_norm": 1.0721231698989868,
"learning_rate": 4.04542228530873e-05,
"loss": 0.5867,
"step": 2250
},
{
"epoch": 3.19,
"grad_norm": 0.9937222003936768,
"learning_rate": 4.038325053229241e-05,
"loss": 0.4945,
"step": 2251
},
{
"epoch": 3.19,
"grad_norm": 1.0190373659133911,
"learning_rate": 4.031227821149752e-05,
"loss": 0.5281,
"step": 2252
},
{
"epoch": 3.19,
"grad_norm": 1.028527855873108,
"learning_rate": 4.024130589070263e-05,
"loss": 0.5356,
"step": 2253
},
{
"epoch": 3.19,
"grad_norm": 0.9732937216758728,
"learning_rate": 4.0170333569907735e-05,
"loss": 0.5344,
"step": 2254
},
{
"epoch": 3.19,
"grad_norm": 0.9506965279579163,
"learning_rate": 4.009936124911285e-05,
"loss": 0.498,
"step": 2255
},
{
"epoch": 3.2,
"grad_norm": 1.0024874210357666,
"learning_rate": 4.0028388928317954e-05,
"loss": 0.5234,
"step": 2256
},
{
"epoch": 3.2,
"grad_norm": 0.9432622790336609,
"learning_rate": 3.995741660752307e-05,
"loss": 0.5332,
"step": 2257
},
{
"epoch": 3.2,
"grad_norm": 0.9941532015800476,
"learning_rate": 3.988644428672818e-05,
"loss": 0.5044,
"step": 2258
},
{
"epoch": 3.2,
"grad_norm": 1.0081837177276611,
"learning_rate": 3.9815471965933286e-05,
"loss": 0.5043,
"step": 2259
},
{
"epoch": 3.2,
"grad_norm": 1.062863826751709,
"learning_rate": 3.97444996451384e-05,
"loss": 0.5296,
"step": 2260
},
{
"epoch": 3.2,
"grad_norm": 0.9733378291130066,
"learning_rate": 3.9673527324343504e-05,
"loss": 0.5047,
"step": 2261
},
{
"epoch": 3.2,
"grad_norm": 0.983255922794342,
"learning_rate": 3.960255500354862e-05,
"loss": 0.4895,
"step": 2262
},
{
"epoch": 3.21,
"grad_norm": 0.9561431407928467,
"learning_rate": 3.953158268275372e-05,
"loss": 0.4546,
"step": 2263
},
{
"epoch": 3.21,
"grad_norm": 1.0252975225448608,
"learning_rate": 3.9460610361958836e-05,
"loss": 0.5129,
"step": 2264
},
{
"epoch": 3.21,
"grad_norm": 0.9920056462287903,
"learning_rate": 3.938963804116395e-05,
"loss": 0.5163,
"step": 2265
},
{
"epoch": 3.21,
"grad_norm": 1.0454301834106445,
"learning_rate": 3.931866572036906e-05,
"loss": 0.5528,
"step": 2266
},
{
"epoch": 3.21,
"grad_norm": 0.9582340717315674,
"learning_rate": 3.924769339957417e-05,
"loss": 0.5108,
"step": 2267
},
{
"epoch": 3.21,
"grad_norm": 1.0326735973358154,
"learning_rate": 3.917672107877928e-05,
"loss": 0.5287,
"step": 2268
},
{
"epoch": 3.21,
"grad_norm": 0.9848905801773071,
"learning_rate": 3.910574875798439e-05,
"loss": 0.5271,
"step": 2269
},
{
"epoch": 3.22,
"grad_norm": 0.949379563331604,
"learning_rate": 3.90347764371895e-05,
"loss": 0.4887,
"step": 2270
},
{
"epoch": 3.22,
"grad_norm": 0.9528018236160278,
"learning_rate": 3.896380411639461e-05,
"loss": 0.4874,
"step": 2271
},
{
"epoch": 3.22,
"grad_norm": 0.96091228723526,
"learning_rate": 3.889283179559972e-05,
"loss": 0.5189,
"step": 2272
},
{
"epoch": 3.22,
"grad_norm": 1.0250592231750488,
"learning_rate": 3.882185947480483e-05,
"loss": 0.5368,
"step": 2273
},
{
"epoch": 3.22,
"grad_norm": 1.0342363119125366,
"learning_rate": 3.875088715400994e-05,
"loss": 0.5076,
"step": 2274
},
{
"epoch": 3.22,
"grad_norm": 0.9662345051765442,
"learning_rate": 3.867991483321505e-05,
"loss": 0.5021,
"step": 2275
},
{
"epoch": 3.22,
"grad_norm": 1.0056486129760742,
"learning_rate": 3.860894251242016e-05,
"loss": 0.5173,
"step": 2276
},
{
"epoch": 3.23,
"grad_norm": 0.9594873785972595,
"learning_rate": 3.853797019162527e-05,
"loss": 0.498,
"step": 2277
},
{
"epoch": 3.23,
"grad_norm": 1.001089096069336,
"learning_rate": 3.846699787083038e-05,
"loss": 0.5125,
"step": 2278
},
{
"epoch": 3.23,
"grad_norm": 1.0407207012176514,
"learning_rate": 3.8396025550035486e-05,
"loss": 0.5567,
"step": 2279
},
{
"epoch": 3.23,
"grad_norm": 0.9708674550056458,
"learning_rate": 3.83250532292406e-05,
"loss": 0.4893,
"step": 2280
},
{
"epoch": 3.23,
"grad_norm": 0.9955769181251526,
"learning_rate": 3.8254080908445704e-05,
"loss": 0.4873,
"step": 2281
},
{
"epoch": 3.23,
"grad_norm": 0.9997064471244812,
"learning_rate": 3.818310858765082e-05,
"loss": 0.5166,
"step": 2282
},
{
"epoch": 3.23,
"grad_norm": 0.9996336698532104,
"learning_rate": 3.811213626685593e-05,
"loss": 0.5054,
"step": 2283
},
{
"epoch": 3.24,
"grad_norm": 0.9845335483551025,
"learning_rate": 3.8041163946061036e-05,
"loss": 0.4903,
"step": 2284
},
{
"epoch": 3.24,
"grad_norm": 1.0074700117111206,
"learning_rate": 3.797019162526615e-05,
"loss": 0.5121,
"step": 2285
},
{
"epoch": 3.24,
"grad_norm": 0.9627556800842285,
"learning_rate": 3.7899219304471255e-05,
"loss": 0.5175,
"step": 2286
},
{
"epoch": 3.24,
"grad_norm": 1.0189731121063232,
"learning_rate": 3.782824698367637e-05,
"loss": 0.5354,
"step": 2287
},
{
"epoch": 3.24,
"grad_norm": 1.014862060546875,
"learning_rate": 3.775727466288147e-05,
"loss": 0.5459,
"step": 2288
},
{
"epoch": 3.24,
"grad_norm": 0.9731964468955994,
"learning_rate": 3.7686302342086586e-05,
"loss": 0.5227,
"step": 2289
},
{
"epoch": 3.24,
"grad_norm": 0.9890885353088379,
"learning_rate": 3.76153300212917e-05,
"loss": 0.5083,
"step": 2290
},
{
"epoch": 3.25,
"grad_norm": 0.9976668357849121,
"learning_rate": 3.7544357700496805e-05,
"loss": 0.5373,
"step": 2291
},
{
"epoch": 3.25,
"grad_norm": 1.04860258102417,
"learning_rate": 3.747338537970192e-05,
"loss": 0.511,
"step": 2292
},
{
"epoch": 3.25,
"grad_norm": 1.0599074363708496,
"learning_rate": 3.740241305890702e-05,
"loss": 0.5566,
"step": 2293
},
{
"epoch": 3.25,
"grad_norm": 0.9964849352836609,
"learning_rate": 3.7331440738112136e-05,
"loss": 0.5177,
"step": 2294
},
{
"epoch": 3.25,
"grad_norm": 0.9439302086830139,
"learning_rate": 3.726046841731725e-05,
"loss": 0.5044,
"step": 2295
},
{
"epoch": 3.25,
"grad_norm": 1.0093363523483276,
"learning_rate": 3.718949609652236e-05,
"loss": 0.5031,
"step": 2296
},
{
"epoch": 3.25,
"grad_norm": 0.9915724396705627,
"learning_rate": 3.711852377572747e-05,
"loss": 0.5038,
"step": 2297
},
{
"epoch": 3.26,
"grad_norm": 0.9472718834877014,
"learning_rate": 3.704755145493258e-05,
"loss": 0.4533,
"step": 2298
},
{
"epoch": 3.26,
"grad_norm": 0.9775221347808838,
"learning_rate": 3.697657913413769e-05,
"loss": 0.5127,
"step": 2299
},
{
"epoch": 3.26,
"grad_norm": 1.0333958864212036,
"learning_rate": 3.69056068133428e-05,
"loss": 0.5467,
"step": 2300
},
{
"epoch": 3.26,
"grad_norm": 1.0134251117706299,
"learning_rate": 3.683463449254791e-05,
"loss": 0.4907,
"step": 2301
},
{
"epoch": 3.26,
"grad_norm": 0.979189932346344,
"learning_rate": 3.676366217175302e-05,
"loss": 0.4739,
"step": 2302
},
{
"epoch": 3.26,
"grad_norm": 0.9691347479820251,
"learning_rate": 3.669268985095813e-05,
"loss": 0.5066,
"step": 2303
},
{
"epoch": 3.26,
"grad_norm": 1.0259438753128052,
"learning_rate": 3.6621717530163236e-05,
"loss": 0.5329,
"step": 2304
},
{
"epoch": 3.27,
"grad_norm": 1.004991054534912,
"learning_rate": 3.655074520936835e-05,
"loss": 0.5315,
"step": 2305
},
{
"epoch": 3.27,
"grad_norm": 1.0028557777404785,
"learning_rate": 3.647977288857346e-05,
"loss": 0.505,
"step": 2306
},
{
"epoch": 3.27,
"grad_norm": 1.0041579008102417,
"learning_rate": 3.640880056777857e-05,
"loss": 0.5333,
"step": 2307
},
{
"epoch": 3.27,
"grad_norm": 0.9609581828117371,
"learning_rate": 3.633782824698368e-05,
"loss": 0.5016,
"step": 2308
},
{
"epoch": 3.27,
"grad_norm": 0.9761263728141785,
"learning_rate": 3.6266855926188786e-05,
"loss": 0.5014,
"step": 2309
},
{
"epoch": 3.27,
"grad_norm": 0.9792344570159912,
"learning_rate": 3.61958836053939e-05,
"loss": 0.4831,
"step": 2310
},
{
"epoch": 3.27,
"grad_norm": 1.0584090948104858,
"learning_rate": 3.6124911284599005e-05,
"loss": 0.5732,
"step": 2311
},
{
"epoch": 3.28,
"grad_norm": 0.9740521907806396,
"learning_rate": 3.605393896380412e-05,
"loss": 0.4929,
"step": 2312
},
{
"epoch": 3.28,
"grad_norm": 0.9656211137771606,
"learning_rate": 3.598296664300923e-05,
"loss": 0.5054,
"step": 2313
},
{
"epoch": 3.28,
"grad_norm": 0.9914239048957825,
"learning_rate": 3.5911994322214336e-05,
"loss": 0.5089,
"step": 2314
},
{
"epoch": 3.28,
"grad_norm": 1.0298843383789062,
"learning_rate": 3.584102200141945e-05,
"loss": 0.544,
"step": 2315
},
{
"epoch": 3.28,
"grad_norm": 0.9930390119552612,
"learning_rate": 3.5770049680624555e-05,
"loss": 0.5594,
"step": 2316
},
{
"epoch": 3.28,
"grad_norm": 1.0042483806610107,
"learning_rate": 3.569907735982967e-05,
"loss": 0.4972,
"step": 2317
},
{
"epoch": 3.28,
"grad_norm": 0.9482210874557495,
"learning_rate": 3.5628105039034774e-05,
"loss": 0.4537,
"step": 2318
},
{
"epoch": 3.29,
"grad_norm": 1.0086231231689453,
"learning_rate": 3.5557132718239886e-05,
"loss": 0.5497,
"step": 2319
},
{
"epoch": 3.29,
"grad_norm": 1.0223073959350586,
"learning_rate": 3.5486160397445e-05,
"loss": 0.4925,
"step": 2320
},
{
"epoch": 3.29,
"grad_norm": 1.0105258226394653,
"learning_rate": 3.5415188076650105e-05,
"loss": 0.513,
"step": 2321
},
{
"epoch": 3.29,
"grad_norm": 1.0318858623504639,
"learning_rate": 3.534421575585522e-05,
"loss": 0.5573,
"step": 2322
},
{
"epoch": 3.29,
"grad_norm": 0.9838947653770447,
"learning_rate": 3.5273243435060324e-05,
"loss": 0.5053,
"step": 2323
},
{
"epoch": 3.29,
"grad_norm": 0.9841655492782593,
"learning_rate": 3.5202271114265437e-05,
"loss": 0.4869,
"step": 2324
},
{
"epoch": 3.29,
"grad_norm": 1.0124472379684448,
"learning_rate": 3.513129879347055e-05,
"loss": 0.529,
"step": 2325
},
{
"epoch": 3.3,
"grad_norm": 1.0195896625518799,
"learning_rate": 3.506032647267566e-05,
"loss": 0.5419,
"step": 2326
},
{
"epoch": 3.3,
"grad_norm": 0.9941637516021729,
"learning_rate": 3.498935415188077e-05,
"loss": 0.5149,
"step": 2327
},
{
"epoch": 3.3,
"grad_norm": 1.036328911781311,
"learning_rate": 3.491838183108588e-05,
"loss": 0.5412,
"step": 2328
},
{
"epoch": 3.3,
"grad_norm": 0.9751386046409607,
"learning_rate": 3.484740951029099e-05,
"loss": 0.5206,
"step": 2329
},
{
"epoch": 3.3,
"grad_norm": 1.0403040647506714,
"learning_rate": 3.47764371894961e-05,
"loss": 0.5609,
"step": 2330
},
{
"epoch": 3.3,
"grad_norm": 0.9578166604042053,
"learning_rate": 3.470546486870121e-05,
"loss": 0.4783,
"step": 2331
},
{
"epoch": 3.3,
"grad_norm": 1.0178450345993042,
"learning_rate": 3.463449254790632e-05,
"loss": 0.531,
"step": 2332
},
{
"epoch": 3.31,
"grad_norm": 1.0271254777908325,
"learning_rate": 3.456352022711143e-05,
"loss": 0.5501,
"step": 2333
},
{
"epoch": 3.31,
"grad_norm": 0.9989926815032959,
"learning_rate": 3.449254790631654e-05,
"loss": 0.5176,
"step": 2334
},
{
"epoch": 3.31,
"grad_norm": 1.00890052318573,
"learning_rate": 3.442157558552165e-05,
"loss": 0.5352,
"step": 2335
},
{
"epoch": 3.31,
"grad_norm": 1.0696969032287598,
"learning_rate": 3.435060326472676e-05,
"loss": 0.539,
"step": 2336
},
{
"epoch": 3.31,
"grad_norm": 1.0061776638031006,
"learning_rate": 3.427963094393187e-05,
"loss": 0.52,
"step": 2337
},
{
"epoch": 3.31,
"grad_norm": 1.0269782543182373,
"learning_rate": 3.420865862313698e-05,
"loss": 0.5162,
"step": 2338
},
{
"epoch": 3.31,
"grad_norm": 0.9664796590805054,
"learning_rate": 3.413768630234209e-05,
"loss": 0.4884,
"step": 2339
},
{
"epoch": 3.32,
"grad_norm": 0.9960459470748901,
"learning_rate": 3.40667139815472e-05,
"loss": 0.5062,
"step": 2340
},
{
"epoch": 3.32,
"grad_norm": 1.0411757230758667,
"learning_rate": 3.3995741660752305e-05,
"loss": 0.5715,
"step": 2341
},
{
"epoch": 3.32,
"grad_norm": 0.9852669835090637,
"learning_rate": 3.392476933995742e-05,
"loss": 0.4707,
"step": 2342
},
{
"epoch": 3.32,
"grad_norm": 1.0226832628250122,
"learning_rate": 3.385379701916253e-05,
"loss": 0.5318,
"step": 2343
},
{
"epoch": 3.32,
"grad_norm": 1.077976107597351,
"learning_rate": 3.378282469836764e-05,
"loss": 0.5557,
"step": 2344
},
{
"epoch": 3.32,
"grad_norm": 1.0263750553131104,
"learning_rate": 3.371185237757275e-05,
"loss": 0.5563,
"step": 2345
},
{
"epoch": 3.32,
"grad_norm": 1.007447600364685,
"learning_rate": 3.3640880056777855e-05,
"loss": 0.5036,
"step": 2346
},
{
"epoch": 3.32,
"grad_norm": 0.9899296760559082,
"learning_rate": 3.356990773598297e-05,
"loss": 0.5017,
"step": 2347
},
{
"epoch": 3.33,
"grad_norm": 0.9798328876495361,
"learning_rate": 3.3498935415188074e-05,
"loss": 0.5436,
"step": 2348
},
{
"epoch": 3.33,
"grad_norm": 0.9951174259185791,
"learning_rate": 3.342796309439319e-05,
"loss": 0.5456,
"step": 2349
},
{
"epoch": 3.33,
"grad_norm": 1.0041948556900024,
"learning_rate": 3.335699077359829e-05,
"loss": 0.5188,
"step": 2350
},
{
"epoch": 3.33,
"grad_norm": 1.0109312534332275,
"learning_rate": 3.3286018452803406e-05,
"loss": 0.5191,
"step": 2351
},
{
"epoch": 3.33,
"grad_norm": 0.9915183186531067,
"learning_rate": 3.321504613200852e-05,
"loss": 0.532,
"step": 2352
},
{
"epoch": 3.33,
"grad_norm": 1.0262149572372437,
"learning_rate": 3.3144073811213624e-05,
"loss": 0.5295,
"step": 2353
},
{
"epoch": 3.33,
"grad_norm": 1.0315930843353271,
"learning_rate": 3.307310149041874e-05,
"loss": 0.5774,
"step": 2354
},
{
"epoch": 3.34,
"grad_norm": 1.0199474096298218,
"learning_rate": 3.300212916962385e-05,
"loss": 0.5729,
"step": 2355
},
{
"epoch": 3.34,
"grad_norm": 0.9959515333175659,
"learning_rate": 3.2931156848828956e-05,
"loss": 0.5117,
"step": 2356
},
{
"epoch": 3.34,
"grad_norm": 0.9782159328460693,
"learning_rate": 3.286018452803407e-05,
"loss": 0.4816,
"step": 2357
},
{
"epoch": 3.34,
"grad_norm": 1.0041710138320923,
"learning_rate": 3.278921220723918e-05,
"loss": 0.5353,
"step": 2358
},
{
"epoch": 3.34,
"grad_norm": 1.0350843667984009,
"learning_rate": 3.2718239886444294e-05,
"loss": 0.5499,
"step": 2359
},
{
"epoch": 3.34,
"grad_norm": 1.0022941827774048,
"learning_rate": 3.26472675656494e-05,
"loss": 0.5387,
"step": 2360
},
{
"epoch": 3.34,
"grad_norm": 0.978432834148407,
"learning_rate": 3.257629524485451e-05,
"loss": 0.5186,
"step": 2361
},
{
"epoch": 3.35,
"grad_norm": 0.9733918309211731,
"learning_rate": 3.250532292405962e-05,
"loss": 0.5027,
"step": 2362
},
{
"epoch": 3.35,
"grad_norm": 0.9844115376472473,
"learning_rate": 3.243435060326473e-05,
"loss": 0.5145,
"step": 2363
},
{
"epoch": 3.35,
"grad_norm": 0.9787808656692505,
"learning_rate": 3.236337828246984e-05,
"loss": 0.4654,
"step": 2364
},
{
"epoch": 3.35,
"grad_norm": 1.0499181747436523,
"learning_rate": 3.229240596167495e-05,
"loss": 0.5723,
"step": 2365
},
{
"epoch": 3.35,
"grad_norm": 0.9862911701202393,
"learning_rate": 3.222143364088006e-05,
"loss": 0.4858,
"step": 2366
},
{
"epoch": 3.35,
"grad_norm": 1.0450081825256348,
"learning_rate": 3.215046132008517e-05,
"loss": 0.5636,
"step": 2367
},
{
"epoch": 3.35,
"grad_norm": 1.0021899938583374,
"learning_rate": 3.207948899929028e-05,
"loss": 0.5229,
"step": 2368
},
{
"epoch": 3.36,
"grad_norm": 0.9917883276939392,
"learning_rate": 3.200851667849539e-05,
"loss": 0.5194,
"step": 2369
},
{
"epoch": 3.36,
"grad_norm": 1.058908462524414,
"learning_rate": 3.19375443577005e-05,
"loss": 0.5492,
"step": 2370
},
{
"epoch": 3.36,
"grad_norm": 1.0056339502334595,
"learning_rate": 3.1866572036905606e-05,
"loss": 0.5112,
"step": 2371
},
{
"epoch": 3.36,
"grad_norm": 1.0141656398773193,
"learning_rate": 3.179559971611072e-05,
"loss": 0.5233,
"step": 2372
},
{
"epoch": 3.36,
"grad_norm": 0.9797987937927246,
"learning_rate": 3.1724627395315825e-05,
"loss": 0.4747,
"step": 2373
},
{
"epoch": 3.36,
"grad_norm": 1.0097317695617676,
"learning_rate": 3.165365507452094e-05,
"loss": 0.5095,
"step": 2374
},
{
"epoch": 3.36,
"grad_norm": 1.0642932653427124,
"learning_rate": 3.158268275372605e-05,
"loss": 0.5635,
"step": 2375
},
{
"epoch": 3.37,
"grad_norm": 0.9883024096488953,
"learning_rate": 3.1511710432931156e-05,
"loss": 0.5196,
"step": 2376
},
{
"epoch": 3.37,
"grad_norm": 1.0617568492889404,
"learning_rate": 3.144073811213627e-05,
"loss": 0.5688,
"step": 2377
},
{
"epoch": 3.37,
"grad_norm": 0.9752853512763977,
"learning_rate": 3.1369765791341375e-05,
"loss": 0.5124,
"step": 2378
},
{
"epoch": 3.37,
"grad_norm": 1.0084364414215088,
"learning_rate": 3.129879347054649e-05,
"loss": 0.5194,
"step": 2379
},
{
"epoch": 3.37,
"grad_norm": 1.0282278060913086,
"learning_rate": 3.122782114975159e-05,
"loss": 0.5088,
"step": 2380
},
{
"epoch": 3.37,
"grad_norm": 1.0256778001785278,
"learning_rate": 3.1156848828956706e-05,
"loss": 0.5544,
"step": 2381
},
{
"epoch": 3.37,
"grad_norm": 1.0465688705444336,
"learning_rate": 3.108587650816182e-05,
"loss": 0.529,
"step": 2382
},
{
"epoch": 3.38,
"grad_norm": 1.0042644739151,
"learning_rate": 3.1014904187366925e-05,
"loss": 0.5066,
"step": 2383
},
{
"epoch": 3.38,
"grad_norm": 1.0506339073181152,
"learning_rate": 3.094393186657204e-05,
"loss": 0.5395,
"step": 2384
},
{
"epoch": 3.38,
"grad_norm": 0.9986273050308228,
"learning_rate": 3.087295954577715e-05,
"loss": 0.4854,
"step": 2385
},
{
"epoch": 3.38,
"grad_norm": 0.9996753334999084,
"learning_rate": 3.0801987224982256e-05,
"loss": 0.5069,
"step": 2386
},
{
"epoch": 3.38,
"grad_norm": 1.0293785333633423,
"learning_rate": 3.073101490418737e-05,
"loss": 0.5439,
"step": 2387
},
{
"epoch": 3.38,
"grad_norm": 1.012476921081543,
"learning_rate": 3.066004258339248e-05,
"loss": 0.5267,
"step": 2388
},
{
"epoch": 3.38,
"grad_norm": 1.0480303764343262,
"learning_rate": 3.0589070262597594e-05,
"loss": 0.5875,
"step": 2389
},
{
"epoch": 3.39,
"grad_norm": 1.0067976713180542,
"learning_rate": 3.05180979418027e-05,
"loss": 0.5189,
"step": 2390
},
{
"epoch": 3.39,
"grad_norm": 0.967479407787323,
"learning_rate": 3.044712562100781e-05,
"loss": 0.4723,
"step": 2391
},
{
"epoch": 3.39,
"grad_norm": 0.9812818169593811,
"learning_rate": 3.037615330021292e-05,
"loss": 0.5152,
"step": 2392
},
{
"epoch": 3.39,
"grad_norm": 1.037245512008667,
"learning_rate": 3.0305180979418028e-05,
"loss": 0.5402,
"step": 2393
},
{
"epoch": 3.39,
"grad_norm": 1.0045421123504639,
"learning_rate": 3.0234208658623138e-05,
"loss": 0.536,
"step": 2394
},
{
"epoch": 3.39,
"grad_norm": 0.9535611867904663,
"learning_rate": 3.016323633782825e-05,
"loss": 0.4775,
"step": 2395
},
{
"epoch": 3.39,
"grad_norm": 1.007875919342041,
"learning_rate": 3.0092264017033356e-05,
"loss": 0.4937,
"step": 2396
},
{
"epoch": 3.4,
"grad_norm": 1.0232481956481934,
"learning_rate": 3.002129169623847e-05,
"loss": 0.5054,
"step": 2397
},
{
"epoch": 3.4,
"grad_norm": 1.022507905960083,
"learning_rate": 2.9950319375443582e-05,
"loss": 0.5438,
"step": 2398
},
{
"epoch": 3.4,
"grad_norm": 1.0051137208938599,
"learning_rate": 2.9879347054648688e-05,
"loss": 0.5085,
"step": 2399
},
{
"epoch": 3.4,
"grad_norm": 0.9932114481925964,
"learning_rate": 2.98083747338538e-05,
"loss": 0.5117,
"step": 2400
},
{
"epoch": 3.4,
"grad_norm": 1.0523576736450195,
"learning_rate": 2.9737402413058906e-05,
"loss": 0.5187,
"step": 2401
},
{
"epoch": 3.4,
"grad_norm": 1.0372650623321533,
"learning_rate": 2.966643009226402e-05,
"loss": 0.5101,
"step": 2402
},
{
"epoch": 3.4,
"grad_norm": 0.9998854398727417,
"learning_rate": 2.9595457771469125e-05,
"loss": 0.5143,
"step": 2403
},
{
"epoch": 3.41,
"grad_norm": 1.0421397686004639,
"learning_rate": 2.9524485450674238e-05,
"loss": 0.5271,
"step": 2404
},
{
"epoch": 3.41,
"grad_norm": 1.0461199283599854,
"learning_rate": 2.945351312987935e-05,
"loss": 0.5193,
"step": 2405
},
{
"epoch": 3.41,
"grad_norm": 1.0345731973648071,
"learning_rate": 2.9382540809084456e-05,
"loss": 0.5227,
"step": 2406
},
{
"epoch": 3.41,
"grad_norm": 1.0057165622711182,
"learning_rate": 2.931156848828957e-05,
"loss": 0.507,
"step": 2407
},
{
"epoch": 3.41,
"grad_norm": 1.0056483745574951,
"learning_rate": 2.924059616749468e-05,
"loss": 0.5158,
"step": 2408
},
{
"epoch": 3.41,
"grad_norm": 1.0037814378738403,
"learning_rate": 2.916962384669979e-05,
"loss": 0.5409,
"step": 2409
},
{
"epoch": 3.41,
"grad_norm": 0.9976779818534851,
"learning_rate": 2.9098651525904897e-05,
"loss": 0.5262,
"step": 2410
},
{
"epoch": 3.42,
"grad_norm": 0.9764635562896729,
"learning_rate": 2.902767920511001e-05,
"loss": 0.5068,
"step": 2411
},
{
"epoch": 3.42,
"grad_norm": 1.001948595046997,
"learning_rate": 2.8956706884315123e-05,
"loss": 0.5317,
"step": 2412
},
{
"epoch": 3.42,
"grad_norm": 0.9676224589347839,
"learning_rate": 2.888573456352023e-05,
"loss": 0.499,
"step": 2413
},
{
"epoch": 3.42,
"grad_norm": 1.027353286743164,
"learning_rate": 2.881476224272534e-05,
"loss": 0.5309,
"step": 2414
},
{
"epoch": 3.42,
"grad_norm": 0.9962853193283081,
"learning_rate": 2.8743789921930447e-05,
"loss": 0.5111,
"step": 2415
},
{
"epoch": 3.42,
"grad_norm": 0.9848844408988953,
"learning_rate": 2.867281760113556e-05,
"loss": 0.5067,
"step": 2416
},
{
"epoch": 3.42,
"grad_norm": 0.9875693321228027,
"learning_rate": 2.8601845280340666e-05,
"loss": 0.5029,
"step": 2417
},
{
"epoch": 3.43,
"grad_norm": 0.9913346171379089,
"learning_rate": 2.853087295954578e-05,
"loss": 0.5344,
"step": 2418
},
{
"epoch": 3.43,
"grad_norm": 0.9555786848068237,
"learning_rate": 2.8459900638750885e-05,
"loss": 0.4685,
"step": 2419
},
{
"epoch": 3.43,
"grad_norm": 1.03782057762146,
"learning_rate": 2.8388928317955997e-05,
"loss": 0.5803,
"step": 2420
},
{
"epoch": 3.43,
"grad_norm": 1.0548306703567505,
"learning_rate": 2.831795599716111e-05,
"loss": 0.5648,
"step": 2421
},
{
"epoch": 3.43,
"grad_norm": 1.0190414190292358,
"learning_rate": 2.824698367636622e-05,
"loss": 0.5182,
"step": 2422
},
{
"epoch": 3.43,
"grad_norm": 1.0644923448562622,
"learning_rate": 2.817601135557133e-05,
"loss": 0.5258,
"step": 2423
},
{
"epoch": 3.43,
"grad_norm": 1.0226292610168457,
"learning_rate": 2.8105039034776438e-05,
"loss": 0.5299,
"step": 2424
},
{
"epoch": 3.44,
"grad_norm": 1.0438193082809448,
"learning_rate": 2.803406671398155e-05,
"loss": 0.5171,
"step": 2425
},
{
"epoch": 3.44,
"grad_norm": 1.064306378364563,
"learning_rate": 2.7963094393186657e-05,
"loss": 0.5664,
"step": 2426
},
{
"epoch": 3.44,
"grad_norm": 0.9808685183525085,
"learning_rate": 2.789212207239177e-05,
"loss": 0.4799,
"step": 2427
},
{
"epoch": 3.44,
"grad_norm": 1.047814965248108,
"learning_rate": 2.7821149751596882e-05,
"loss": 0.5405,
"step": 2428
},
{
"epoch": 3.44,
"grad_norm": 0.992023229598999,
"learning_rate": 2.7750177430801988e-05,
"loss": 0.5017,
"step": 2429
},
{
"epoch": 3.44,
"grad_norm": 1.0322489738464355,
"learning_rate": 2.76792051100071e-05,
"loss": 0.5467,
"step": 2430
},
{
"epoch": 3.44,
"grad_norm": 0.9788199663162231,
"learning_rate": 2.7608232789212207e-05,
"loss": 0.4835,
"step": 2431
},
{
"epoch": 3.45,
"grad_norm": 1.0321805477142334,
"learning_rate": 2.753726046841732e-05,
"loss": 0.4991,
"step": 2432
},
{
"epoch": 3.45,
"grad_norm": 1.0222502946853638,
"learning_rate": 2.7466288147622425e-05,
"loss": 0.5144,
"step": 2433
},
{
"epoch": 3.45,
"grad_norm": 1.042504072189331,
"learning_rate": 2.7395315826827538e-05,
"loss": 0.5507,
"step": 2434
},
{
"epoch": 3.45,
"grad_norm": 1.035918116569519,
"learning_rate": 2.7324343506032644e-05,
"loss": 0.5504,
"step": 2435
},
{
"epoch": 3.45,
"grad_norm": 1.0212652683258057,
"learning_rate": 2.7253371185237757e-05,
"loss": 0.5284,
"step": 2436
},
{
"epoch": 3.45,
"grad_norm": 1.0973209142684937,
"learning_rate": 2.718239886444287e-05,
"loss": 0.5614,
"step": 2437
},
{
"epoch": 3.45,
"grad_norm": 0.9826099276542664,
"learning_rate": 2.711142654364798e-05,
"loss": 0.4764,
"step": 2438
},
{
"epoch": 3.46,
"grad_norm": 1.0115211009979248,
"learning_rate": 2.7040454222853088e-05,
"loss": 0.5199,
"step": 2439
},
{
"epoch": 3.46,
"grad_norm": 1.0676625967025757,
"learning_rate": 2.6969481902058198e-05,
"loss": 0.5502,
"step": 2440
},
{
"epoch": 3.46,
"grad_norm": 1.050166130065918,
"learning_rate": 2.689850958126331e-05,
"loss": 0.5669,
"step": 2441
},
{
"epoch": 3.46,
"grad_norm": 1.061370849609375,
"learning_rate": 2.6827537260468416e-05,
"loss": 0.5565,
"step": 2442
},
{
"epoch": 3.46,
"grad_norm": 1.0100698471069336,
"learning_rate": 2.675656493967353e-05,
"loss": 0.5359,
"step": 2443
},
{
"epoch": 3.46,
"grad_norm": 0.9985615015029907,
"learning_rate": 2.6685592618878642e-05,
"loss": 0.5417,
"step": 2444
},
{
"epoch": 3.46,
"grad_norm": 1.0049725770950317,
"learning_rate": 2.6614620298083748e-05,
"loss": 0.4995,
"step": 2445
},
{
"epoch": 3.47,
"grad_norm": 0.973824143409729,
"learning_rate": 2.654364797728886e-05,
"loss": 0.4978,
"step": 2446
},
{
"epoch": 3.47,
"grad_norm": 1.0502867698669434,
"learning_rate": 2.6472675656493966e-05,
"loss": 0.5296,
"step": 2447
},
{
"epoch": 3.47,
"grad_norm": 0.9943152070045471,
"learning_rate": 2.640170333569908e-05,
"loss": 0.5324,
"step": 2448
},
{
"epoch": 3.47,
"grad_norm": 1.0061664581298828,
"learning_rate": 2.6330731014904185e-05,
"loss": 0.4898,
"step": 2449
},
{
"epoch": 3.47,
"grad_norm": 0.9694445133209229,
"learning_rate": 2.6259758694109298e-05,
"loss": 0.4816,
"step": 2450
},
{
"epoch": 3.47,
"grad_norm": 1.0240046977996826,
"learning_rate": 2.618878637331441e-05,
"loss": 0.5127,
"step": 2451
},
{
"epoch": 3.47,
"grad_norm": 1.0239776372909546,
"learning_rate": 2.6117814052519516e-05,
"loss": 0.5422,
"step": 2452
},
{
"epoch": 3.48,
"grad_norm": 1.0031204223632812,
"learning_rate": 2.604684173172463e-05,
"loss": 0.5236,
"step": 2453
},
{
"epoch": 3.48,
"grad_norm": 0.9446493983268738,
"learning_rate": 2.597586941092974e-05,
"loss": 0.4619,
"step": 2454
},
{
"epoch": 3.48,
"grad_norm": 1.073948621749878,
"learning_rate": 2.590489709013485e-05,
"loss": 0.5362,
"step": 2455
},
{
"epoch": 3.48,
"grad_norm": 0.9938516616821289,
"learning_rate": 2.5833924769339957e-05,
"loss": 0.5062,
"step": 2456
},
{
"epoch": 3.48,
"grad_norm": 1.0002453327178955,
"learning_rate": 2.576295244854507e-05,
"loss": 0.5083,
"step": 2457
},
{
"epoch": 3.48,
"grad_norm": 1.0422013998031616,
"learning_rate": 2.5691980127750176e-05,
"loss": 0.5477,
"step": 2458
},
{
"epoch": 3.48,
"grad_norm": 0.9741196632385254,
"learning_rate": 2.562100780695529e-05,
"loss": 0.4608,
"step": 2459
},
{
"epoch": 3.49,
"grad_norm": 1.0457109212875366,
"learning_rate": 2.55500354861604e-05,
"loss": 0.5356,
"step": 2460
},
{
"epoch": 3.49,
"grad_norm": 1.0058279037475586,
"learning_rate": 2.5479063165365507e-05,
"loss": 0.5145,
"step": 2461
},
{
"epoch": 3.49,
"grad_norm": 0.999752402305603,
"learning_rate": 2.540809084457062e-05,
"loss": 0.5211,
"step": 2462
},
{
"epoch": 3.49,
"grad_norm": 1.0829323530197144,
"learning_rate": 2.5337118523775726e-05,
"loss": 0.5486,
"step": 2463
},
{
"epoch": 3.49,
"grad_norm": 1.0869967937469482,
"learning_rate": 2.526614620298084e-05,
"loss": 0.5673,
"step": 2464
},
{
"epoch": 3.49,
"grad_norm": 1.0342005491256714,
"learning_rate": 2.5195173882185945e-05,
"loss": 0.5199,
"step": 2465
},
{
"epoch": 3.49,
"grad_norm": 0.9886661767959595,
"learning_rate": 2.5124201561391057e-05,
"loss": 0.4892,
"step": 2466
},
{
"epoch": 3.49,
"grad_norm": 0.9562023878097534,
"learning_rate": 2.505322924059617e-05,
"loss": 0.4768,
"step": 2467
},
{
"epoch": 3.5,
"grad_norm": 1.0431307554244995,
"learning_rate": 2.498225691980128e-05,
"loss": 0.5173,
"step": 2468
},
{
"epoch": 3.5,
"grad_norm": 0.9886699914932251,
"learning_rate": 2.491128459900639e-05,
"loss": 0.5218,
"step": 2469
},
{
"epoch": 3.5,
"grad_norm": 1.0256837606430054,
"learning_rate": 2.48403122782115e-05,
"loss": 0.5232,
"step": 2470
},
{
"epoch": 3.5,
"grad_norm": 1.0431032180786133,
"learning_rate": 2.476933995741661e-05,
"loss": 0.5279,
"step": 2471
},
{
"epoch": 3.5,
"grad_norm": 0.9854641556739807,
"learning_rate": 2.469836763662172e-05,
"loss": 0.498,
"step": 2472
},
{
"epoch": 3.5,
"grad_norm": 1.0274698734283447,
"learning_rate": 2.462739531582683e-05,
"loss": 0.5412,
"step": 2473
},
{
"epoch": 3.5,
"grad_norm": 1.0287938117980957,
"learning_rate": 2.455642299503194e-05,
"loss": 0.5629,
"step": 2474
},
{
"epoch": 3.51,
"grad_norm": 0.9638839364051819,
"learning_rate": 2.4485450674237048e-05,
"loss": 0.5243,
"step": 2475
},
{
"epoch": 3.51,
"grad_norm": 1.0268467664718628,
"learning_rate": 2.4414478353442157e-05,
"loss": 0.5069,
"step": 2476
},
{
"epoch": 3.51,
"grad_norm": 1.0005607604980469,
"learning_rate": 2.4343506032647267e-05,
"loss": 0.5016,
"step": 2477
},
{
"epoch": 3.51,
"grad_norm": 1.0163365602493286,
"learning_rate": 2.427253371185238e-05,
"loss": 0.5177,
"step": 2478
},
{
"epoch": 3.51,
"grad_norm": 1.0533087253570557,
"learning_rate": 2.420156139105749e-05,
"loss": 0.551,
"step": 2479
},
{
"epoch": 3.51,
"grad_norm": 1.117048740386963,
"learning_rate": 2.4130589070262598e-05,
"loss": 0.6015,
"step": 2480
},
{
"epoch": 3.51,
"grad_norm": 1.0275087356567383,
"learning_rate": 2.4059616749467708e-05,
"loss": 0.5362,
"step": 2481
},
{
"epoch": 3.52,
"grad_norm": 1.0228657722473145,
"learning_rate": 2.3988644428672817e-05,
"loss": 0.5356,
"step": 2482
},
{
"epoch": 3.52,
"grad_norm": 1.01448655128479,
"learning_rate": 2.391767210787793e-05,
"loss": 0.5046,
"step": 2483
},
{
"epoch": 3.52,
"grad_norm": 0.9837957620620728,
"learning_rate": 2.384669978708304e-05,
"loss": 0.4721,
"step": 2484
},
{
"epoch": 3.52,
"grad_norm": 1.0101453065872192,
"learning_rate": 2.3775727466288148e-05,
"loss": 0.5056,
"step": 2485
},
{
"epoch": 3.52,
"grad_norm": 1.0054988861083984,
"learning_rate": 2.370475514549326e-05,
"loss": 0.5063,
"step": 2486
},
{
"epoch": 3.52,
"grad_norm": 1.026909351348877,
"learning_rate": 2.363378282469837e-05,
"loss": 0.5346,
"step": 2487
},
{
"epoch": 3.52,
"grad_norm": 1.0565681457519531,
"learning_rate": 2.356281050390348e-05,
"loss": 0.5563,
"step": 2488
},
{
"epoch": 3.53,
"grad_norm": 0.9695550203323364,
"learning_rate": 2.349183818310859e-05,
"loss": 0.4942,
"step": 2489
},
{
"epoch": 3.53,
"grad_norm": 1.0256010293960571,
"learning_rate": 2.34208658623137e-05,
"loss": 0.5254,
"step": 2490
},
{
"epoch": 3.53,
"grad_norm": 0.9935940504074097,
"learning_rate": 2.3349893541518808e-05,
"loss": 0.5267,
"step": 2491
},
{
"epoch": 3.53,
"grad_norm": 1.0092248916625977,
"learning_rate": 2.3278921220723917e-05,
"loss": 0.5296,
"step": 2492
},
{
"epoch": 3.53,
"grad_norm": 1.0321085453033447,
"learning_rate": 2.320794889992903e-05,
"loss": 0.525,
"step": 2493
},
{
"epoch": 3.53,
"grad_norm": 1.0207672119140625,
"learning_rate": 2.313697657913414e-05,
"loss": 0.5035,
"step": 2494
},
{
"epoch": 3.53,
"grad_norm": 1.0441876649856567,
"learning_rate": 2.306600425833925e-05,
"loss": 0.541,
"step": 2495
},
{
"epoch": 3.54,
"grad_norm": 1.0213267803192139,
"learning_rate": 2.2995031937544358e-05,
"loss": 0.5041,
"step": 2496
},
{
"epoch": 3.54,
"grad_norm": 1.019863247871399,
"learning_rate": 2.2924059616749467e-05,
"loss": 0.5105,
"step": 2497
},
{
"epoch": 3.54,
"grad_norm": 0.9917488098144531,
"learning_rate": 2.285308729595458e-05,
"loss": 0.5081,
"step": 2498
},
{
"epoch": 3.54,
"grad_norm": 0.9887694716453552,
"learning_rate": 2.278211497515969e-05,
"loss": 0.4958,
"step": 2499
},
{
"epoch": 3.54,
"grad_norm": 1.0222865343093872,
"learning_rate": 2.27111426543648e-05,
"loss": 0.5191,
"step": 2500
}
],
"logging_steps": 1,
"max_steps": 2820,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"total_flos": 5.0634694852608e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}