| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 9.944281524926687, |
| "eval_steps": 500, |
| "global_step": 1700, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.005865102639296188, |
| "grad_norm": 36.14509413690048, |
| "learning_rate": 7.843137254901962e-07, |
| "loss": 2.5787, |
| "mean_token_accuracy": 0.4923335835337639, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.011730205278592375, |
| "grad_norm": 36.4475794468042, |
| "learning_rate": 1.5686274509803923e-06, |
| "loss": 2.6539, |
| "mean_token_accuracy": 0.48734448477625847, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.017595307917888565, |
| "grad_norm": 32.10454485992091, |
| "learning_rate": 2.3529411764705885e-06, |
| "loss": 2.543, |
| "mean_token_accuracy": 0.48166975751519203, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.02346041055718475, |
| "grad_norm": 26.242796758548412, |
| "learning_rate": 3.1372549019607846e-06, |
| "loss": 2.4279, |
| "mean_token_accuracy": 0.5031268112361431, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.02932551319648094, |
| "grad_norm": 15.599962593546184, |
| "learning_rate": 3.92156862745098e-06, |
| "loss": 2.1324, |
| "mean_token_accuracy": 0.5300704799592495, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.03519061583577713, |
| "grad_norm": 12.006705982946158, |
| "learning_rate": 4.705882352941177e-06, |
| "loss": 1.8898, |
| "mean_token_accuracy": 0.572486124932766, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.04105571847507331, |
| "grad_norm": 11.765766521790566, |
| "learning_rate": 5.4901960784313735e-06, |
| "loss": 1.8913, |
| "mean_token_accuracy": 0.5782412365078926, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.0469208211143695, |
| "grad_norm": 9.957672917685553, |
| "learning_rate": 6.274509803921569e-06, |
| "loss": 1.6377, |
| "mean_token_accuracy": 0.6252397820353508, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.05278592375366569, |
| "grad_norm": 7.539104060500998, |
| "learning_rate": 7.058823529411766e-06, |
| "loss": 1.5152, |
| "mean_token_accuracy": 0.64116121083498, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.05865102639296188, |
| "grad_norm": 6.311700758053688, |
| "learning_rate": 7.84313725490196e-06, |
| "loss": 1.4626, |
| "mean_token_accuracy": 0.6572283431887627, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.06451612903225806, |
| "grad_norm": 6.03423316811303, |
| "learning_rate": 8.627450980392157e-06, |
| "loss": 1.32, |
| "mean_token_accuracy": 0.6869097501039505, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.07038123167155426, |
| "grad_norm": 5.8161864206350735, |
| "learning_rate": 9.411764705882354e-06, |
| "loss": 1.2741, |
| "mean_token_accuracy": 0.6884993687272072, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.07624633431085044, |
| "grad_norm": 10.50308433461848, |
| "learning_rate": 1.0196078431372549e-05, |
| "loss": 1.2259, |
| "mean_token_accuracy": 0.6989131048321724, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.08211143695014662, |
| "grad_norm": 5.059241917511266, |
| "learning_rate": 1.0980392156862747e-05, |
| "loss": 1.3513, |
| "mean_token_accuracy": 0.6826794818043709, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.08797653958944282, |
| "grad_norm": 5.89298633359988, |
| "learning_rate": 1.1764705882352942e-05, |
| "loss": 1.3141, |
| "mean_token_accuracy": 0.6757656708359718, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.093841642228739, |
| "grad_norm": 5.168080959685619, |
| "learning_rate": 1.2549019607843138e-05, |
| "loss": 1.2261, |
| "mean_token_accuracy": 0.691883496940136, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.09970674486803519, |
| "grad_norm": 4.484141065615124, |
| "learning_rate": 1.3333333333333333e-05, |
| "loss": 1.1443, |
| "mean_token_accuracy": 0.7193374708294868, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.10557184750733138, |
| "grad_norm": 5.453207713810213, |
| "learning_rate": 1.4117647058823532e-05, |
| "loss": 1.1347, |
| "mean_token_accuracy": 0.7149711772799492, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.11143695014662756, |
| "grad_norm": 4.536133715127149, |
| "learning_rate": 1.4901960784313726e-05, |
| "loss": 1.2998, |
| "mean_token_accuracy": 0.6797390431165695, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.11730205278592376, |
| "grad_norm": 4.246131189412244, |
| "learning_rate": 1.568627450980392e-05, |
| "loss": 1.1751, |
| "mean_token_accuracy": 0.7038690000772476, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.12316715542521994, |
| "grad_norm": 4.073009596887261, |
| "learning_rate": 1.647058823529412e-05, |
| "loss": 1.1252, |
| "mean_token_accuracy": 0.7140218988060951, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.12903225806451613, |
| "grad_norm": 3.699907899806749, |
| "learning_rate": 1.7254901960784314e-05, |
| "loss": 1.149, |
| "mean_token_accuracy": 0.714351512491703, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.1348973607038123, |
| "grad_norm": 3.972868223544853, |
| "learning_rate": 1.8039215686274513e-05, |
| "loss": 1.0669, |
| "mean_token_accuracy": 0.7354029938578606, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.14076246334310852, |
| "grad_norm": 3.7388993855253982, |
| "learning_rate": 1.8823529411764708e-05, |
| "loss": 1.0795, |
| "mean_token_accuracy": 0.7333876341581345, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.1466275659824047, |
| "grad_norm": 4.046285000278155, |
| "learning_rate": 1.9607843137254903e-05, |
| "loss": 1.0917, |
| "mean_token_accuracy": 0.7187787368893623, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.15249266862170088, |
| "grad_norm": 4.194005650841938, |
| "learning_rate": 2.0392156862745097e-05, |
| "loss": 1.1695, |
| "mean_token_accuracy": 0.7088376209139824, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.15835777126099707, |
| "grad_norm": 4.162135874639218, |
| "learning_rate": 2.1176470588235296e-05, |
| "loss": 1.0882, |
| "mean_token_accuracy": 0.7265072241425514, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.16422287390029325, |
| "grad_norm": 3.6630230495700165, |
| "learning_rate": 2.1960784313725494e-05, |
| "loss": 0.9752, |
| "mean_token_accuracy": 0.7463881373405457, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.17008797653958943, |
| "grad_norm": 4.192134812090267, |
| "learning_rate": 2.274509803921569e-05, |
| "loss": 1.0355, |
| "mean_token_accuracy": 0.7389096990227699, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.17595307917888564, |
| "grad_norm": 4.153201939553374, |
| "learning_rate": 2.3529411764705884e-05, |
| "loss": 1.08, |
| "mean_token_accuracy": 0.723046787083149, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.18181818181818182, |
| "grad_norm": 4.158994764338232, |
| "learning_rate": 2.431372549019608e-05, |
| "loss": 1.2401, |
| "mean_token_accuracy": 0.7062453478574753, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.187683284457478, |
| "grad_norm": 4.056181311918012, |
| "learning_rate": 2.5098039215686277e-05, |
| "loss": 0.967, |
| "mean_token_accuracy": 0.750753328204155, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.1935483870967742, |
| "grad_norm": 4.093997127140846, |
| "learning_rate": 2.5882352941176475e-05, |
| "loss": 1.0725, |
| "mean_token_accuracy": 0.7313473895192146, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.19941348973607037, |
| "grad_norm": 3.2432440297693557, |
| "learning_rate": 2.6666666666666667e-05, |
| "loss": 0.9912, |
| "mean_token_accuracy": 0.7401512935757637, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.20527859237536658, |
| "grad_norm": 3.8928776327849253, |
| "learning_rate": 2.7450980392156865e-05, |
| "loss": 1.023, |
| "mean_token_accuracy": 0.7389869540929794, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.21114369501466276, |
| "grad_norm": 3.8559916286631495, |
| "learning_rate": 2.8235294117647063e-05, |
| "loss": 1.045, |
| "mean_token_accuracy": 0.7432608231902122, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.21700879765395895, |
| "grad_norm": 3.420716935604122, |
| "learning_rate": 2.9019607843137258e-05, |
| "loss": 0.9842, |
| "mean_token_accuracy": 0.7604367211461067, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.22287390029325513, |
| "grad_norm": 3.5100168737573747, |
| "learning_rate": 2.9803921568627453e-05, |
| "loss": 0.921, |
| "mean_token_accuracy": 0.7548864558339119, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.2287390029325513, |
| "grad_norm": 3.7074432289739767, |
| "learning_rate": 3.0588235294117644e-05, |
| "loss": 1.1716, |
| "mean_token_accuracy": 0.7088168561458588, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.23460410557184752, |
| "grad_norm": 3.3102650365950748, |
| "learning_rate": 3.137254901960784e-05, |
| "loss": 0.9172, |
| "mean_token_accuracy": 0.7633765339851379, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.2404692082111437, |
| "grad_norm": 3.149920605006581, |
| "learning_rate": 3.215686274509804e-05, |
| "loss": 0.9249, |
| "mean_token_accuracy": 0.7586075663566589, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.24633431085043989, |
| "grad_norm": 3.2393738963150964, |
| "learning_rate": 3.294117647058824e-05, |
| "loss": 0.8995, |
| "mean_token_accuracy": 0.7706887423992157, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.25219941348973607, |
| "grad_norm": 2.952349602555809, |
| "learning_rate": 3.372549019607844e-05, |
| "loss": 0.7726, |
| "mean_token_accuracy": 0.7944382950663567, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.25806451612903225, |
| "grad_norm": 3.252665163092252, |
| "learning_rate": 3.450980392156863e-05, |
| "loss": 0.9039, |
| "mean_token_accuracy": 0.7575281262397766, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.26392961876832843, |
| "grad_norm": 3.3763731210881596, |
| "learning_rate": 3.529411764705883e-05, |
| "loss": 0.8264, |
| "mean_token_accuracy": 0.7781133502721786, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.2697947214076246, |
| "grad_norm": 3.30879019930114, |
| "learning_rate": 3.6078431372549025e-05, |
| "loss": 0.9723, |
| "mean_token_accuracy": 0.7479118257761002, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.2756598240469208, |
| "grad_norm": 3.464519745062525, |
| "learning_rate": 3.686274509803922e-05, |
| "loss": 0.9331, |
| "mean_token_accuracy": 0.7574068754911423, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.28152492668621704, |
| "grad_norm": 3.1553239901431693, |
| "learning_rate": 3.7647058823529415e-05, |
| "loss": 0.8816, |
| "mean_token_accuracy": 0.7704252302646637, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.2873900293255132, |
| "grad_norm": 3.346562743926702, |
| "learning_rate": 3.8431372549019614e-05, |
| "loss": 0.8737, |
| "mean_token_accuracy": 0.7910923138260841, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.2932551319648094, |
| "grad_norm": 3.3482663437109106, |
| "learning_rate": 3.9215686274509805e-05, |
| "loss": 1.0582, |
| "mean_token_accuracy": 0.7400793433189392, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.2991202346041056, |
| "grad_norm": 2.808453690268802, |
| "learning_rate": 4e-05, |
| "loss": 0.8091, |
| "mean_token_accuracy": 0.7851236239075661, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.30498533724340177, |
| "grad_norm": 3.3563707690930378, |
| "learning_rate": 3.999996733363487e-05, |
| "loss": 0.9554, |
| "mean_token_accuracy": 0.759026862680912, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.31085043988269795, |
| "grad_norm": 3.177868427473436, |
| "learning_rate": 3.9999869334658026e-05, |
| "loss": 0.8829, |
| "mean_token_accuracy": 0.7689605951309204, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.31671554252199413, |
| "grad_norm": 3.155245803816592, |
| "learning_rate": 3.9999706003425177e-05, |
| "loss": 0.9032, |
| "mean_token_accuracy": 0.7667737677693367, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.3225806451612903, |
| "grad_norm": 3.178614863392828, |
| "learning_rate": 3.999947734052915e-05, |
| "loss": 1.0082, |
| "mean_token_accuracy": 0.746660441160202, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.3284457478005865, |
| "grad_norm": 2.8354174033280786, |
| "learning_rate": 3.999918334679989e-05, |
| "loss": 0.9165, |
| "mean_token_accuracy": 0.7646413072943687, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.3343108504398827, |
| "grad_norm": 3.050044566712993, |
| "learning_rate": 3.999882402330448e-05, |
| "loss": 0.8559, |
| "mean_token_accuracy": 0.7663176953792572, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.34017595307917886, |
| "grad_norm": 2.766027156435859, |
| "learning_rate": 3.999839937134712e-05, |
| "loss": 0.8021, |
| "mean_token_accuracy": 0.7877992242574692, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.3460410557184751, |
| "grad_norm": 3.1089150664448253, |
| "learning_rate": 3.999790939246912e-05, |
| "loss": 1.0441, |
| "mean_token_accuracy": 0.7402208596467972, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.3519061583577713, |
| "grad_norm": 3.070052989823742, |
| "learning_rate": 3.999735408844892e-05, |
| "loss": 0.8303, |
| "mean_token_accuracy": 0.7838614583015442, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.35777126099706746, |
| "grad_norm": 2.996347966135584, |
| "learning_rate": 3.999673346130203e-05, |
| "loss": 0.9312, |
| "mean_token_accuracy": 0.7652290537953377, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.36363636363636365, |
| "grad_norm": 2.690721520008225, |
| "learning_rate": 3.999604751328109e-05, |
| "loss": 0.7596, |
| "mean_token_accuracy": 0.8071479573845863, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.36950146627565983, |
| "grad_norm": 2.773174553879102, |
| "learning_rate": 3.999529624687581e-05, |
| "loss": 0.7103, |
| "mean_token_accuracy": 0.8112666308879852, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.375366568914956, |
| "grad_norm": 2.93779834083201, |
| "learning_rate": 3.999447966481298e-05, |
| "loss": 0.8697, |
| "mean_token_accuracy": 0.7921107411384583, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.3812316715542522, |
| "grad_norm": 3.1672195550770197, |
| "learning_rate": 3.999359777005647e-05, |
| "loss": 0.9436, |
| "mean_token_accuracy": 0.7524725720286369, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.3870967741935484, |
| "grad_norm": 2.8351133608014525, |
| "learning_rate": 3.999265056580719e-05, |
| "loss": 0.7133, |
| "mean_token_accuracy": 0.8027519956231117, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.39296187683284456, |
| "grad_norm": 3.3754028291704237, |
| "learning_rate": 3.999163805550313e-05, |
| "loss": 1.0152, |
| "mean_token_accuracy": 0.763345830142498, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.39882697947214074, |
| "grad_norm": 3.0381916656684504, |
| "learning_rate": 3.9990560242819274e-05, |
| "loss": 0.8966, |
| "mean_token_accuracy": 0.7718857899308205, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.4046920821114369, |
| "grad_norm": 2.5365722428064412, |
| "learning_rate": 3.9989417131667647e-05, |
| "loss": 0.7638, |
| "mean_token_accuracy": 0.8163110539317131, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.41055718475073316, |
| "grad_norm": 2.7915162380323855, |
| "learning_rate": 3.9988208726197293e-05, |
| "loss": 0.7726, |
| "mean_token_accuracy": 0.7922361120581627, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.41642228739002934, |
| "grad_norm": 2.978173227270339, |
| "learning_rate": 3.998693503079423e-05, |
| "loss": 0.927, |
| "mean_token_accuracy": 0.7751563489437103, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.4222873900293255, |
| "grad_norm": 2.8448079415335235, |
| "learning_rate": 3.998559605008146e-05, |
| "loss": 0.801, |
| "mean_token_accuracy": 0.779315672814846, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.4281524926686217, |
| "grad_norm": 2.8387570831780815, |
| "learning_rate": 3.9984191788918936e-05, |
| "loss": 0.8389, |
| "mean_token_accuracy": 0.7829952985048294, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.4340175953079179, |
| "grad_norm": 2.5878507908913915, |
| "learning_rate": 3.998272225240356e-05, |
| "loss": 0.9174, |
| "mean_token_accuracy": 0.7716861665248871, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.4398826979472141, |
| "grad_norm": 2.6510951948539994, |
| "learning_rate": 3.9981187445869165e-05, |
| "loss": 0.7813, |
| "mean_token_accuracy": 0.8107101172208786, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.44574780058651026, |
| "grad_norm": 2.697548713971357, |
| "learning_rate": 3.9979587374886466e-05, |
| "loss": 0.8972, |
| "mean_token_accuracy": 0.7646167501807213, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.45161290322580644, |
| "grad_norm": 2.8139069796203504, |
| "learning_rate": 3.997792204526309e-05, |
| "loss": 0.8033, |
| "mean_token_accuracy": 0.7897998914122581, |
| "step": 77 |
| }, |
| { |
| "epoch": 0.4574780058651026, |
| "grad_norm": 2.3436223362656476, |
| "learning_rate": 3.99761914630435e-05, |
| "loss": 0.7414, |
| "mean_token_accuracy": 0.7999845147132874, |
| "step": 78 |
| }, |
| { |
| "epoch": 0.4633431085043988, |
| "grad_norm": 2.594588586928223, |
| "learning_rate": 3.997439563450901e-05, |
| "loss": 0.7127, |
| "mean_token_accuracy": 0.8027607202529907, |
| "step": 79 |
| }, |
| { |
| "epoch": 0.46920821114369504, |
| "grad_norm": 2.8704349406315517, |
| "learning_rate": 3.997253456617775e-05, |
| "loss": 0.7686, |
| "mean_token_accuracy": 0.8007281050086021, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.4750733137829912, |
| "grad_norm": 2.5510137739446686, |
| "learning_rate": 3.997060826480465e-05, |
| "loss": 0.6935, |
| "mean_token_accuracy": 0.8144687339663506, |
| "step": 81 |
| }, |
| { |
| "epoch": 0.4809384164222874, |
| "grad_norm": 2.360869808267721, |
| "learning_rate": 3.9968616737381414e-05, |
| "loss": 0.7855, |
| "mean_token_accuracy": 0.8035428002476692, |
| "step": 82 |
| }, |
| { |
| "epoch": 0.4868035190615836, |
| "grad_norm": 2.2447686355133514, |
| "learning_rate": 3.996655999113647e-05, |
| "loss": 0.6925, |
| "mean_token_accuracy": 0.8235335797071457, |
| "step": 83 |
| }, |
| { |
| "epoch": 0.49266862170087977, |
| "grad_norm": 2.2547861026647382, |
| "learning_rate": 3.9964438033534994e-05, |
| "loss": 0.5884, |
| "mean_token_accuracy": 0.8370934575796127, |
| "step": 84 |
| }, |
| { |
| "epoch": 0.49853372434017595, |
| "grad_norm": 2.309713135086869, |
| "learning_rate": 3.996225087227881e-05, |
| "loss": 0.736, |
| "mean_token_accuracy": 0.8138050213456154, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.5043988269794721, |
| "grad_norm": 2.344634268841188, |
| "learning_rate": 3.995999851530645e-05, |
| "loss": 0.6836, |
| "mean_token_accuracy": 0.8341879695653915, |
| "step": 86 |
| }, |
| { |
| "epoch": 0.5102639296187683, |
| "grad_norm": 2.5276817825702507, |
| "learning_rate": 3.995768097079305e-05, |
| "loss": 0.7201, |
| "mean_token_accuracy": 0.8180172145366669, |
| "step": 87 |
| }, |
| { |
| "epoch": 0.5161290322580645, |
| "grad_norm": 2.930450810837515, |
| "learning_rate": 3.9955298247150365e-05, |
| "loss": 0.8532, |
| "mean_token_accuracy": 0.7764740958809853, |
| "step": 88 |
| }, |
| { |
| "epoch": 0.5219941348973607, |
| "grad_norm": 2.635872197106337, |
| "learning_rate": 3.9952850353026715e-05, |
| "loss": 0.7202, |
| "mean_token_accuracy": 0.7964513823390007, |
| "step": 89 |
| }, |
| { |
| "epoch": 0.5278592375366569, |
| "grad_norm": 2.6560892710983475, |
| "learning_rate": 3.9950337297306976e-05, |
| "loss": 0.7718, |
| "mean_token_accuracy": 0.8056656494736671, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.533724340175953, |
| "grad_norm": 2.9785297087153233, |
| "learning_rate": 3.994775908911251e-05, |
| "loss": 0.8443, |
| "mean_token_accuracy": 0.7823826372623444, |
| "step": 91 |
| }, |
| { |
| "epoch": 0.5395894428152492, |
| "grad_norm": 2.6848005659586094, |
| "learning_rate": 3.9945115737801183e-05, |
| "loss": 0.6993, |
| "mean_token_accuracy": 0.8107479214668274, |
| "step": 92 |
| }, |
| { |
| "epoch": 0.5454545454545454, |
| "grad_norm": 2.6298393012226247, |
| "learning_rate": 3.99424072529673e-05, |
| "loss": 0.8409, |
| "mean_token_accuracy": 0.7889999225735664, |
| "step": 93 |
| }, |
| { |
| "epoch": 0.5513196480938416, |
| "grad_norm": 2.579937157375281, |
| "learning_rate": 3.993963364444155e-05, |
| "loss": 0.7234, |
| "mean_token_accuracy": 0.8055694922804832, |
| "step": 94 |
| }, |
| { |
| "epoch": 0.5571847507331378, |
| "grad_norm": 2.955654530084866, |
| "learning_rate": 3.9936794922291015e-05, |
| "loss": 0.8405, |
| "mean_token_accuracy": 0.7774112895131111, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.5630498533724341, |
| "grad_norm": 3.0436689004178534, |
| "learning_rate": 3.993389109681912e-05, |
| "loss": 0.7653, |
| "mean_token_accuracy": 0.79569511115551, |
| "step": 96 |
| }, |
| { |
| "epoch": 0.5689149560117303, |
| "grad_norm": 2.817659396803083, |
| "learning_rate": 3.993092217856557e-05, |
| "loss": 0.7067, |
| "mean_token_accuracy": 0.8145394548773766, |
| "step": 97 |
| }, |
| { |
| "epoch": 0.5747800586510264, |
| "grad_norm": 2.7515378637269468, |
| "learning_rate": 3.9927888178306346e-05, |
| "loss": 0.8013, |
| "mean_token_accuracy": 0.791895680129528, |
| "step": 98 |
| }, |
| { |
| "epoch": 0.5806451612903226, |
| "grad_norm": 2.907537751643136, |
| "learning_rate": 3.992478910705364e-05, |
| "loss": 0.8031, |
| "mean_token_accuracy": 0.7878992408514023, |
| "step": 99 |
| }, |
| { |
| "epoch": 0.5865102639296188, |
| "grad_norm": 2.979279035766898, |
| "learning_rate": 3.992162497605583e-05, |
| "loss": 0.6806, |
| "mean_token_accuracy": 0.8274494782090187, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.592375366568915, |
| "grad_norm": 2.4353950711528043, |
| "learning_rate": 3.991839579679742e-05, |
| "loss": 0.7353, |
| "mean_token_accuracy": 0.806957870721817, |
| "step": 101 |
| }, |
| { |
| "epoch": 0.5982404692082112, |
| "grad_norm": 2.4572327476853957, |
| "learning_rate": 3.991510158099905e-05, |
| "loss": 0.5704, |
| "mean_token_accuracy": 0.846699096262455, |
| "step": 102 |
| }, |
| { |
| "epoch": 0.6041055718475073, |
| "grad_norm": 2.3286640449972222, |
| "learning_rate": 3.991174234061738e-05, |
| "loss": 0.6006, |
| "mean_token_accuracy": 0.8506223112344742, |
| "step": 103 |
| }, |
| { |
| "epoch": 0.6099706744868035, |
| "grad_norm": 2.631876593176074, |
| "learning_rate": 3.9908318087845104e-05, |
| "loss": 0.7379, |
| "mean_token_accuracy": 0.8100381121039391, |
| "step": 104 |
| }, |
| { |
| "epoch": 0.6158357771260997, |
| "grad_norm": 2.2789202678122584, |
| "learning_rate": 3.990482883511086e-05, |
| "loss": 0.5341, |
| "mean_token_accuracy": 0.8537792935967445, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.6217008797653959, |
| "grad_norm": 2.120418551508169, |
| "learning_rate": 3.990127459507924e-05, |
| "loss": 0.6053, |
| "mean_token_accuracy": 0.8266285732388496, |
| "step": 106 |
| }, |
| { |
| "epoch": 0.6275659824046921, |
| "grad_norm": 2.364686651816796, |
| "learning_rate": 3.98976553806507e-05, |
| "loss": 0.5868, |
| "mean_token_accuracy": 0.840075246989727, |
| "step": 107 |
| }, |
| { |
| "epoch": 0.6334310850439883, |
| "grad_norm": 2.451948661442587, |
| "learning_rate": 3.989397120496152e-05, |
| "loss": 0.5193, |
| "mean_token_accuracy": 0.8647155538201332, |
| "step": 108 |
| }, |
| { |
| "epoch": 0.6392961876832844, |
| "grad_norm": 2.355709310543082, |
| "learning_rate": 3.989022208138377e-05, |
| "loss": 0.5675, |
| "mean_token_accuracy": 0.8488794639706612, |
| "step": 109 |
| }, |
| { |
| "epoch": 0.6451612903225806, |
| "grad_norm": 3.02749013010203, |
| "learning_rate": 3.9886408023525256e-05, |
| "loss": 0.7619, |
| "mean_token_accuracy": 0.8114860579371452, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.6510263929618768, |
| "grad_norm": 2.8000919382354432, |
| "learning_rate": 3.9882529045229475e-05, |
| "loss": 0.7982, |
| "mean_token_accuracy": 0.7895509079098701, |
| "step": 111 |
| }, |
| { |
| "epoch": 0.656891495601173, |
| "grad_norm": 3.219052282134806, |
| "learning_rate": 3.987858516057554e-05, |
| "loss": 0.5985, |
| "mean_token_accuracy": 0.8427421972155571, |
| "step": 112 |
| }, |
| { |
| "epoch": 0.6627565982404692, |
| "grad_norm": 2.433256738390738, |
| "learning_rate": 3.9874576383878165e-05, |
| "loss": 0.6474, |
| "mean_token_accuracy": 0.8321207016706467, |
| "step": 113 |
| }, |
| { |
| "epoch": 0.6686217008797654, |
| "grad_norm": 2.3493562113379274, |
| "learning_rate": 3.9870502729687594e-05, |
| "loss": 0.6348, |
| "mean_token_accuracy": 0.8373970687389374, |
| "step": 114 |
| }, |
| { |
| "epoch": 0.6744868035190615, |
| "grad_norm": 2.331602788927495, |
| "learning_rate": 3.986636421278954e-05, |
| "loss": 0.6854, |
| "mean_token_accuracy": 0.8225691393017769, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.6803519061583577, |
| "grad_norm": 2.142107119555709, |
| "learning_rate": 3.986216084820515e-05, |
| "loss": 0.5011, |
| "mean_token_accuracy": 0.8588138148188591, |
| "step": 116 |
| }, |
| { |
| "epoch": 0.6862170087976539, |
| "grad_norm": 2.419032052988893, |
| "learning_rate": 3.985789265119095e-05, |
| "loss": 0.5726, |
| "mean_token_accuracy": 0.8352588415145874, |
| "step": 117 |
| }, |
| { |
| "epoch": 0.6920821114369502, |
| "grad_norm": 2.09085744769477, |
| "learning_rate": 3.985355963723875e-05, |
| "loss": 0.4849, |
| "mean_token_accuracy": 0.8706546425819397, |
| "step": 118 |
| }, |
| { |
| "epoch": 0.6979472140762464, |
| "grad_norm": 2.1422570833131664, |
| "learning_rate": 3.9849161822075655e-05, |
| "loss": 0.5376, |
| "mean_token_accuracy": 0.8550170734524727, |
| "step": 119 |
| }, |
| { |
| "epoch": 0.7038123167155426, |
| "grad_norm": 2.388752967701632, |
| "learning_rate": 3.984469922166396e-05, |
| "loss": 0.6278, |
| "mean_token_accuracy": 0.842079646885395, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.7096774193548387, |
| "grad_norm": 2.518159286077506, |
| "learning_rate": 3.984017185220109e-05, |
| "loss": 0.7772, |
| "mean_token_accuracy": 0.8071364387869835, |
| "step": 121 |
| }, |
| { |
| "epoch": 0.7155425219941349, |
| "grad_norm": 2.2531929182116746, |
| "learning_rate": 3.9835579730119576e-05, |
| "loss": 0.6705, |
| "mean_token_accuracy": 0.8298157975077629, |
| "step": 122 |
| }, |
| { |
| "epoch": 0.7214076246334311, |
| "grad_norm": 2.207859071920299, |
| "learning_rate": 3.9830922872086974e-05, |
| "loss": 0.6655, |
| "mean_token_accuracy": 0.8434372246265411, |
| "step": 123 |
| }, |
| { |
| "epoch": 0.7272727272727273, |
| "grad_norm": 2.2972010787323818, |
| "learning_rate": 3.9826201295005784e-05, |
| "loss": 0.7564, |
| "mean_token_accuracy": 0.8130914643406868, |
| "step": 124 |
| }, |
| { |
| "epoch": 0.7331378299120235, |
| "grad_norm": 2.6764034052539807, |
| "learning_rate": 3.982141501601343e-05, |
| "loss": 0.7236, |
| "mean_token_accuracy": 0.8133985474705696, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.7390029325513197, |
| "grad_norm": 2.2683967522097817, |
| "learning_rate": 3.9816564052482164e-05, |
| "loss": 0.646, |
| "mean_token_accuracy": 0.825585164129734, |
| "step": 126 |
| }, |
| { |
| "epoch": 0.7448680351906158, |
| "grad_norm": 2.2683787938151263, |
| "learning_rate": 3.981164842201904e-05, |
| "loss": 0.6917, |
| "mean_token_accuracy": 0.8327226713299751, |
| "step": 127 |
| }, |
| { |
| "epoch": 0.750733137829912, |
| "grad_norm": 2.3009219430061982, |
| "learning_rate": 3.9806668142465804e-05, |
| "loss": 0.7333, |
| "mean_token_accuracy": 0.8195018395781517, |
| "step": 128 |
| }, |
| { |
| "epoch": 0.7565982404692082, |
| "grad_norm": 2.045832816656146, |
| "learning_rate": 3.9801623231898856e-05, |
| "loss": 0.5323, |
| "mean_token_accuracy": 0.8589048609137535, |
| "step": 129 |
| }, |
| { |
| "epoch": 0.7624633431085044, |
| "grad_norm": 2.0363179289257833, |
| "learning_rate": 3.9796513708629186e-05, |
| "loss": 0.5484, |
| "mean_token_accuracy": 0.8502952381968498, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.7683284457478006, |
| "grad_norm": 2.1787352594889122, |
| "learning_rate": 3.979133959120229e-05, |
| "loss": 0.535, |
| "mean_token_accuracy": 0.857828326523304, |
| "step": 131 |
| }, |
| { |
| "epoch": 0.7741935483870968, |
| "grad_norm": 2.077917146910636, |
| "learning_rate": 3.9786100898398145e-05, |
| "loss": 0.5617, |
| "mean_token_accuracy": 0.8493303209543228, |
| "step": 132 |
| }, |
| { |
| "epoch": 0.7800586510263929, |
| "grad_norm": 2.097361535539421, |
| "learning_rate": 3.9780797649231085e-05, |
| "loss": 0.5901, |
| "mean_token_accuracy": 0.8467446342110634, |
| "step": 133 |
| }, |
| { |
| "epoch": 0.7859237536656891, |
| "grad_norm": 2.3198793806949154, |
| "learning_rate": 3.9775429862949745e-05, |
| "loss": 0.6641, |
| "mean_token_accuracy": 0.8338883817195892, |
| "step": 134 |
| }, |
| { |
| "epoch": 0.7917888563049853, |
| "grad_norm": 2.260193065434171, |
| "learning_rate": 3.976999755903704e-05, |
| "loss": 0.6673, |
| "mean_token_accuracy": 0.827575221657753, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.7976539589442815, |
| "grad_norm": 2.1386712402376036, |
| "learning_rate": 3.976450075721003e-05, |
| "loss": 0.5606, |
| "mean_token_accuracy": 0.8533760160207748, |
| "step": 136 |
| }, |
| { |
| "epoch": 0.8035190615835777, |
| "grad_norm": 2.145352123969171, |
| "learning_rate": 3.975893947741989e-05, |
| "loss": 0.5056, |
| "mean_token_accuracy": 0.8680669069290161, |
| "step": 137 |
| }, |
| { |
| "epoch": 0.8093841642228738, |
| "grad_norm": 2.1768056211172295, |
| "learning_rate": 3.9753313739851824e-05, |
| "loss": 0.6832, |
| "mean_token_accuracy": 0.8207377269864082, |
| "step": 138 |
| }, |
| { |
| "epoch": 0.8152492668621701, |
| "grad_norm": 2.3681293407151314, |
| "learning_rate": 3.974762356492498e-05, |
| "loss": 0.7659, |
| "mean_token_accuracy": 0.8131817951798439, |
| "step": 139 |
| }, |
| { |
| "epoch": 0.8211143695014663, |
| "grad_norm": 2.379227773317173, |
| "learning_rate": 3.974186897329239e-05, |
| "loss": 0.5398, |
| "mean_token_accuracy": 0.8676523044705391, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.8269794721407625, |
| "grad_norm": 2.1423522624381, |
| "learning_rate": 3.97360499858409e-05, |
| "loss": 0.5587, |
| "mean_token_accuracy": 0.8550106212496758, |
| "step": 141 |
| }, |
| { |
| "epoch": 0.8328445747800587, |
| "grad_norm": 2.3739550490799304, |
| "learning_rate": 3.9730166623691096e-05, |
| "loss": 0.7078, |
| "mean_token_accuracy": 0.8150490075349808, |
| "step": 142 |
| }, |
| { |
| "epoch": 0.8387096774193549, |
| "grad_norm": 2.280723224876568, |
| "learning_rate": 3.9724218908197194e-05, |
| "loss": 0.5059, |
| "mean_token_accuracy": 0.853252723813057, |
| "step": 143 |
| }, |
| { |
| "epoch": 0.844574780058651, |
| "grad_norm": 2.7177341329057483, |
| "learning_rate": 3.971820686094701e-05, |
| "loss": 0.7745, |
| "mean_token_accuracy": 0.8030019998550415, |
| "step": 144 |
| }, |
| { |
| "epoch": 0.8504398826979472, |
| "grad_norm": 2.2866660183054055, |
| "learning_rate": 3.971213050376183e-05, |
| "loss": 0.6888, |
| "mean_token_accuracy": 0.8160409331321716, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.8563049853372434, |
| "grad_norm": 1.7729411173268295, |
| "learning_rate": 3.9705989858696387e-05, |
| "loss": 0.5162, |
| "mean_token_accuracy": 0.8643370196223259, |
| "step": 146 |
| }, |
| { |
| "epoch": 0.8621700879765396, |
| "grad_norm": 1.9544676092787794, |
| "learning_rate": 3.969978494803876e-05, |
| "loss": 0.5033, |
| "mean_token_accuracy": 0.8608671575784683, |
| "step": 147 |
| }, |
| { |
| "epoch": 0.8680351906158358, |
| "grad_norm": 2.0416284223126797, |
| "learning_rate": 3.969351579431024e-05, |
| "loss": 0.5282, |
| "mean_token_accuracy": 0.8565196245908737, |
| "step": 148 |
| }, |
| { |
| "epoch": 0.873900293255132, |
| "grad_norm": 1.9202190012827038, |
| "learning_rate": 3.968718242026533e-05, |
| "loss": 0.4962, |
| "mean_token_accuracy": 0.8656453415751457, |
| "step": 149 |
| }, |
| { |
| "epoch": 0.8797653958944281, |
| "grad_norm": 1.7711919486309162, |
| "learning_rate": 3.968078484889163e-05, |
| "loss": 0.3981, |
| "mean_token_accuracy": 0.8840658068656921, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.8856304985337243, |
| "grad_norm": 2.103353063920332, |
| "learning_rate": 3.9674323103409736e-05, |
| "loss": 0.5512, |
| "mean_token_accuracy": 0.8531129956245422, |
| "step": 151 |
| }, |
| { |
| "epoch": 0.8914956011730205, |
| "grad_norm": 2.441157781778918, |
| "learning_rate": 3.966779720727317e-05, |
| "loss": 0.6903, |
| "mean_token_accuracy": 0.8275642022490501, |
| "step": 152 |
| }, |
| { |
| "epoch": 0.8973607038123167, |
| "grad_norm": 2.1644819233748693, |
| "learning_rate": 3.9661207184168305e-05, |
| "loss": 0.535, |
| "mean_token_accuracy": 0.8525630459189415, |
| "step": 153 |
| }, |
| { |
| "epoch": 0.9032258064516129, |
| "grad_norm": 2.2864455907051577, |
| "learning_rate": 3.9654553058014265e-05, |
| "loss": 0.6607, |
| "mean_token_accuracy": 0.8330980539321899, |
| "step": 154 |
| }, |
| { |
| "epoch": 0.9090909090909091, |
| "grad_norm": 2.0226335451161654, |
| "learning_rate": 3.9647834852962825e-05, |
| "loss": 0.542, |
| "mean_token_accuracy": 0.8625759854912758, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.9149560117302052, |
| "grad_norm": 2.420244762324832, |
| "learning_rate": 3.964105259339838e-05, |
| "loss": 0.7424, |
| "mean_token_accuracy": 0.8057239204645157, |
| "step": 156 |
| }, |
| { |
| "epoch": 0.9208211143695014, |
| "grad_norm": 1.8979120793832391, |
| "learning_rate": 3.9634206303937773e-05, |
| "loss": 0.4512, |
| "mean_token_accuracy": 0.875034749507904, |
| "step": 157 |
| }, |
| { |
| "epoch": 0.9266862170087976, |
| "grad_norm": 1.7284509709305393, |
| "learning_rate": 3.962729600943028e-05, |
| "loss": 0.448, |
| "mean_token_accuracy": 0.8827922642230988, |
| "step": 158 |
| }, |
| { |
| "epoch": 0.9325513196480938, |
| "grad_norm": 2.3569337927624945, |
| "learning_rate": 3.962032173495748e-05, |
| "loss": 0.4919, |
| "mean_token_accuracy": 0.8744383007287979, |
| "step": 159 |
| }, |
| { |
| "epoch": 0.9384164222873901, |
| "grad_norm": 1.9017225299582532, |
| "learning_rate": 3.961328350583316e-05, |
| "loss": 0.4726, |
| "mean_token_accuracy": 0.871865801513195, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.9442815249266863, |
| "grad_norm": 2.1065821737511614, |
| "learning_rate": 3.960618134760327e-05, |
| "loss": 0.5991, |
| "mean_token_accuracy": 0.8471841290593147, |
| "step": 161 |
| }, |
| { |
| "epoch": 0.9501466275659824, |
| "grad_norm": 1.7468566820440716, |
| "learning_rate": 3.959901528604575e-05, |
| "loss": 0.3703, |
| "mean_token_accuracy": 0.8899563401937485, |
| "step": 162 |
| }, |
| { |
| "epoch": 0.9560117302052786, |
| "grad_norm": 2.0026819483535525, |
| "learning_rate": 3.959178534717053e-05, |
| "loss": 0.6124, |
| "mean_token_accuracy": 0.844733901321888, |
| "step": 163 |
| }, |
| { |
| "epoch": 0.9618768328445748, |
| "grad_norm": 1.8429810966574525, |
| "learning_rate": 3.9584491557219366e-05, |
| "loss": 0.6114, |
| "mean_token_accuracy": 0.8512536585330963, |
| "step": 164 |
| }, |
| { |
| "epoch": 0.967741935483871, |
| "grad_norm": 1.9068504240620572, |
| "learning_rate": 3.957713394266576e-05, |
| "loss": 0.5225, |
| "mean_token_accuracy": 0.8591367825865746, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.9736070381231672, |
| "grad_norm": 2.03904846981795, |
| "learning_rate": 3.956971253021489e-05, |
| "loss": 0.4543, |
| "mean_token_accuracy": 0.8774362131953239, |
| "step": 166 |
| }, |
| { |
| "epoch": 0.9794721407624634, |
| "grad_norm": 1.9730559274041801, |
| "learning_rate": 3.956222734680348e-05, |
| "loss": 0.5391, |
| "mean_token_accuracy": 0.8570215106010437, |
| "step": 167 |
| }, |
| { |
| "epoch": 0.9853372434017595, |
| "grad_norm": 1.9611601962690912, |
| "learning_rate": 3.955467841959972e-05, |
| "loss": 0.5588, |
| "mean_token_accuracy": 0.8605179488658905, |
| "step": 168 |
| }, |
| { |
| "epoch": 0.9912023460410557, |
| "grad_norm": 1.8542913981304092, |
| "learning_rate": 3.954706577600318e-05, |
| "loss": 0.5001, |
| "mean_token_accuracy": 0.8608422949910164, |
| "step": 169 |
| }, |
| { |
| "epoch": 0.9970674486803519, |
| "grad_norm": 2.0514447581437483, |
| "learning_rate": 3.953938944364467e-05, |
| "loss": 0.6395, |
| "mean_token_accuracy": 0.8432887569069862, |
| "step": 170 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 2.0514447581437483, |
| "learning_rate": 3.953164945038618e-05, |
| "loss": 0.56, |
| "mean_token_accuracy": 0.8685450553894043, |
| "step": 171 |
| }, |
| { |
| "epoch": 1.0058651026392962, |
| "grad_norm": 2.798511260901564, |
| "learning_rate": 3.952384582432076e-05, |
| "loss": 0.3523, |
| "mean_token_accuracy": 0.8961983993649483, |
| "step": 172 |
| }, |
| { |
| "epoch": 1.0117302052785924, |
| "grad_norm": 1.6143327247401649, |
| "learning_rate": 3.9515978593772426e-05, |
| "loss": 0.2906, |
| "mean_token_accuracy": 0.9175504371523857, |
| "step": 173 |
| }, |
| { |
| "epoch": 1.0175953079178885, |
| "grad_norm": 1.5392085231630535, |
| "learning_rate": 3.9508047787296034e-05, |
| "loss": 0.2327, |
| "mean_token_accuracy": 0.9279068484902382, |
| "step": 174 |
| }, |
| { |
| "epoch": 1.0234604105571847, |
| "grad_norm": 1.4196610216082999, |
| "learning_rate": 3.9500053433677226e-05, |
| "loss": 0.238, |
| "mean_token_accuracy": 0.9234108552336693, |
| "step": 175 |
| }, |
| { |
| "epoch": 1.029325513196481, |
| "grad_norm": 1.739223331966883, |
| "learning_rate": 3.949199556193226e-05, |
| "loss": 0.3358, |
| "mean_token_accuracy": 0.8978307694196701, |
| "step": 176 |
| }, |
| { |
| "epoch": 1.035190615835777, |
| "grad_norm": 1.6733699048918613, |
| "learning_rate": 3.948387420130796e-05, |
| "loss": 0.2404, |
| "mean_token_accuracy": 0.9289108365774155, |
| "step": 177 |
| }, |
| { |
| "epoch": 1.0410557184750733, |
| "grad_norm": 1.6691732023766321, |
| "learning_rate": 3.94756893812816e-05, |
| "loss": 0.317, |
| "mean_token_accuracy": 0.9076567217707634, |
| "step": 178 |
| }, |
| { |
| "epoch": 1.0469208211143695, |
| "grad_norm": 1.8180580779574043, |
| "learning_rate": 3.946744113156075e-05, |
| "loss": 0.2832, |
| "mean_token_accuracy": 0.9121890664100647, |
| "step": 179 |
| }, |
| { |
| "epoch": 1.0527859237536656, |
| "grad_norm": 2.060502078131102, |
| "learning_rate": 3.945912948208324e-05, |
| "loss": 0.391, |
| "mean_token_accuracy": 0.893414668738842, |
| "step": 180 |
| }, |
| { |
| "epoch": 1.0586510263929618, |
| "grad_norm": 1.8802598380610254, |
| "learning_rate": 3.9450754463016994e-05, |
| "loss": 0.3546, |
| "mean_token_accuracy": 0.8969884589314461, |
| "step": 181 |
| }, |
| { |
| "epoch": 1.064516129032258, |
| "grad_norm": 1.9657434071689015, |
| "learning_rate": 3.9442316104759955e-05, |
| "loss": 0.3342, |
| "mean_token_accuracy": 0.906390093266964, |
| "step": 182 |
| }, |
| { |
| "epoch": 1.0703812316715542, |
| "grad_norm": 1.488874908366716, |
| "learning_rate": 3.943381443793994e-05, |
| "loss": 0.3531, |
| "mean_token_accuracy": 0.904318280518055, |
| "step": 183 |
| }, |
| { |
| "epoch": 1.0762463343108504, |
| "grad_norm": 1.8571080458413325, |
| "learning_rate": 3.9425249493414585e-05, |
| "loss": 0.4322, |
| "mean_token_accuracy": 0.8704885244369507, |
| "step": 184 |
| }, |
| { |
| "epoch": 1.0821114369501466, |
| "grad_norm": 1.7341671914679213, |
| "learning_rate": 3.941662130227118e-05, |
| "loss": 0.4563, |
| "mean_token_accuracy": 0.868084505200386, |
| "step": 185 |
| }, |
| { |
| "epoch": 1.0879765395894427, |
| "grad_norm": 1.9301135643943996, |
| "learning_rate": 3.940792989582654e-05, |
| "loss": 0.3557, |
| "mean_token_accuracy": 0.8965374007821083, |
| "step": 186 |
| }, |
| { |
| "epoch": 1.093841642228739, |
| "grad_norm": 1.9752759214040492, |
| "learning_rate": 3.939917530562701e-05, |
| "loss": 0.2913, |
| "mean_token_accuracy": 0.9181935787200928, |
| "step": 187 |
| }, |
| { |
| "epoch": 1.099706744868035, |
| "grad_norm": 1.80853857916468, |
| "learning_rate": 3.939035756344818e-05, |
| "loss": 0.3053, |
| "mean_token_accuracy": 0.9187766760587692, |
| "step": 188 |
| }, |
| { |
| "epoch": 1.1055718475073313, |
| "grad_norm": 1.7923257882566426, |
| "learning_rate": 3.93814767012949e-05, |
| "loss": 0.3427, |
| "mean_token_accuracy": 0.8995430916547775, |
| "step": 189 |
| }, |
| { |
| "epoch": 1.1114369501466275, |
| "grad_norm": 1.5136850309097942, |
| "learning_rate": 3.937253275140113e-05, |
| "loss": 0.253, |
| "mean_token_accuracy": 0.9268705397844315, |
| "step": 190 |
| }, |
| { |
| "epoch": 1.1173020527859236, |
| "grad_norm": 1.6967629405583817, |
| "learning_rate": 3.936352574622978e-05, |
| "loss": 0.2441, |
| "mean_token_accuracy": 0.9279790148139, |
| "step": 191 |
| }, |
| { |
| "epoch": 1.1231671554252198, |
| "grad_norm": 1.3723631113386732, |
| "learning_rate": 3.9354455718472646e-05, |
| "loss": 0.3622, |
| "mean_token_accuracy": 0.9010487943887711, |
| "step": 192 |
| }, |
| { |
| "epoch": 1.129032258064516, |
| "grad_norm": 2.033274073582061, |
| "learning_rate": 3.934532270105026e-05, |
| "loss": 0.4103, |
| "mean_token_accuracy": 0.8935407474637032, |
| "step": 193 |
| }, |
| { |
| "epoch": 1.1348973607038122, |
| "grad_norm": 2.155900058898323, |
| "learning_rate": 3.933612672711179e-05, |
| "loss": 0.3699, |
| "mean_token_accuracy": 0.8951970860362053, |
| "step": 194 |
| }, |
| { |
| "epoch": 1.1407624633431086, |
| "grad_norm": 1.6616746411097532, |
| "learning_rate": 3.9326867830034915e-05, |
| "loss": 0.3785, |
| "mean_token_accuracy": 0.8921806812286377, |
| "step": 195 |
| }, |
| { |
| "epoch": 1.1466275659824048, |
| "grad_norm": 1.702931865108539, |
| "learning_rate": 3.931754604342568e-05, |
| "loss": 0.2909, |
| "mean_token_accuracy": 0.9116301015019417, |
| "step": 196 |
| }, |
| { |
| "epoch": 1.152492668621701, |
| "grad_norm": 1.7232592227816033, |
| "learning_rate": 3.930816140111842e-05, |
| "loss": 0.2667, |
| "mean_token_accuracy": 0.9185249134898186, |
| "step": 197 |
| }, |
| { |
| "epoch": 1.1583577712609971, |
| "grad_norm": 1.840797018145673, |
| "learning_rate": 3.929871393717558e-05, |
| "loss": 0.336, |
| "mean_token_accuracy": 0.9130111038684845, |
| "step": 198 |
| }, |
| { |
| "epoch": 1.1642228739002933, |
| "grad_norm": 1.917246893078202, |
| "learning_rate": 3.9289203685887644e-05, |
| "loss": 0.3626, |
| "mean_token_accuracy": 0.8997843265533447, |
| "step": 199 |
| }, |
| { |
| "epoch": 1.1700879765395895, |
| "grad_norm": 1.734990074144959, |
| "learning_rate": 3.927963068177299e-05, |
| "loss": 0.3718, |
| "mean_token_accuracy": 0.8979229480028152, |
| "step": 200 |
| }, |
| { |
| "epoch": 1.1759530791788857, |
| "grad_norm": 1.8303417823422754, |
| "learning_rate": 3.926999495957775e-05, |
| "loss": 0.4617, |
| "mean_token_accuracy": 0.8707368224859238, |
| "step": 201 |
| }, |
| { |
| "epoch": 1.1818181818181819, |
| "grad_norm": 1.820508589510773, |
| "learning_rate": 3.9260296554275704e-05, |
| "loss": 0.4704, |
| "mean_token_accuracy": 0.8739859238266945, |
| "step": 202 |
| }, |
| { |
| "epoch": 1.187683284457478, |
| "grad_norm": 1.9745656184822862, |
| "learning_rate": 3.925053550106815e-05, |
| "loss": 0.3245, |
| "mean_token_accuracy": 0.9014926105737686, |
| "step": 203 |
| }, |
| { |
| "epoch": 1.1935483870967742, |
| "grad_norm": 1.6927826737387262, |
| "learning_rate": 3.9240711835383766e-05, |
| "loss": 0.2987, |
| "mean_token_accuracy": 0.9058414027094841, |
| "step": 204 |
| }, |
| { |
| "epoch": 1.1994134897360704, |
| "grad_norm": 1.5770657862401005, |
| "learning_rate": 3.9230825592878494e-05, |
| "loss": 0.313, |
| "mean_token_accuracy": 0.9111779928207397, |
| "step": 205 |
| }, |
| { |
| "epoch": 1.2052785923753666, |
| "grad_norm": 1.675854486117753, |
| "learning_rate": 3.92208768094354e-05, |
| "loss": 0.2895, |
| "mean_token_accuracy": 0.9199853986501694, |
| "step": 206 |
| }, |
| { |
| "epoch": 1.2111436950146628, |
| "grad_norm": 1.6015878865124002, |
| "learning_rate": 3.921086552116455e-05, |
| "loss": 0.2811, |
| "mean_token_accuracy": 0.9138716906309128, |
| "step": 207 |
| }, |
| { |
| "epoch": 1.217008797653959, |
| "grad_norm": 1.718206784339055, |
| "learning_rate": 3.920079176440288e-05, |
| "loss": 0.2725, |
| "mean_token_accuracy": 0.9215174838900566, |
| "step": 208 |
| }, |
| { |
| "epoch": 1.2228739002932552, |
| "grad_norm": 1.7698754302048199, |
| "learning_rate": 3.9190655575714045e-05, |
| "loss": 0.4332, |
| "mean_token_accuracy": 0.8880220949649811, |
| "step": 209 |
| }, |
| { |
| "epoch": 1.2287390029325513, |
| "grad_norm": 1.9266880843929173, |
| "learning_rate": 3.918045699188833e-05, |
| "loss": 0.3224, |
| "mean_token_accuracy": 0.9085577055811882, |
| "step": 210 |
| }, |
| { |
| "epoch": 1.2346041055718475, |
| "grad_norm": 1.6249028390550062, |
| "learning_rate": 3.9170196049942474e-05, |
| "loss": 0.2676, |
| "mean_token_accuracy": 0.9211436435580254, |
| "step": 211 |
| }, |
| { |
| "epoch": 1.2404692082111437, |
| "grad_norm": 1.533372145565326, |
| "learning_rate": 3.915987278711954e-05, |
| "loss": 0.2888, |
| "mean_token_accuracy": 0.9172268733382225, |
| "step": 212 |
| }, |
| { |
| "epoch": 1.2463343108504399, |
| "grad_norm": 1.4691488921950937, |
| "learning_rate": 3.914948724088883e-05, |
| "loss": 0.3936, |
| "mean_token_accuracy": 0.8931452110409737, |
| "step": 213 |
| }, |
| { |
| "epoch": 1.252199413489736, |
| "grad_norm": 1.8993586750719262, |
| "learning_rate": 3.913903944894565e-05, |
| "loss": 0.3314, |
| "mean_token_accuracy": 0.9030940681695938, |
| "step": 214 |
| }, |
| { |
| "epoch": 1.2580645161290323, |
| "grad_norm": 1.61898052839045, |
| "learning_rate": 3.912852944921129e-05, |
| "loss": 0.2963, |
| "mean_token_accuracy": 0.9144224375486374, |
| "step": 215 |
| }, |
| { |
| "epoch": 1.2639296187683284, |
| "grad_norm": 1.949574828408539, |
| "learning_rate": 3.911795727983279e-05, |
| "loss": 0.3361, |
| "mean_token_accuracy": 0.9106857255101204, |
| "step": 216 |
| }, |
| { |
| "epoch": 1.2697947214076246, |
| "grad_norm": 1.8205919307098928, |
| "learning_rate": 3.910732297918285e-05, |
| "loss": 0.3859, |
| "mean_token_accuracy": 0.9019497409462929, |
| "step": 217 |
| }, |
| { |
| "epoch": 1.2756598240469208, |
| "grad_norm": 2.1271420763487634, |
| "learning_rate": 3.90966265858597e-05, |
| "loss": 0.3987, |
| "mean_token_accuracy": 0.8942231386899948, |
| "step": 218 |
| }, |
| { |
| "epoch": 1.281524926686217, |
| "grad_norm": 1.8506277761965328, |
| "learning_rate": 3.908586813868693e-05, |
| "loss": 0.3845, |
| "mean_token_accuracy": 0.8941864669322968, |
| "step": 219 |
| }, |
| { |
| "epoch": 1.2873900293255132, |
| "grad_norm": 1.8340544441465951, |
| "learning_rate": 3.9075047676713354e-05, |
| "loss": 0.3838, |
| "mean_token_accuracy": 0.8961951732635498, |
| "step": 220 |
| }, |
| { |
| "epoch": 1.2932551319648093, |
| "grad_norm": 1.7279379977091884, |
| "learning_rate": 3.9064165239212874e-05, |
| "loss": 0.3649, |
| "mean_token_accuracy": 0.8994920030236244, |
| "step": 221 |
| }, |
| { |
| "epoch": 1.2991202346041055, |
| "grad_norm": 1.7761193410253977, |
| "learning_rate": 3.905322086568434e-05, |
| "loss": 0.3971, |
| "mean_token_accuracy": 0.8978786915540695, |
| "step": 222 |
| }, |
| { |
| "epoch": 1.3049853372434017, |
| "grad_norm": 2.1577492725825773, |
| "learning_rate": 3.904221459585142e-05, |
| "loss": 0.3386, |
| "mean_token_accuracy": 0.9025338441133499, |
| "step": 223 |
| }, |
| { |
| "epoch": 1.310850439882698, |
| "grad_norm": 2.0991144733466127, |
| "learning_rate": 3.903114646966242e-05, |
| "loss": 0.394, |
| "mean_token_accuracy": 0.8991600722074509, |
| "step": 224 |
| }, |
| { |
| "epoch": 1.316715542521994, |
| "grad_norm": 1.5430466222950896, |
| "learning_rate": 3.9020016527290166e-05, |
| "loss": 0.3554, |
| "mean_token_accuracy": 0.8956394866108894, |
| "step": 225 |
| }, |
| { |
| "epoch": 1.3225806451612903, |
| "grad_norm": 1.5321878304784884, |
| "learning_rate": 3.900882480913185e-05, |
| "loss": 0.2586, |
| "mean_token_accuracy": 0.9249091520905495, |
| "step": 226 |
| }, |
| { |
| "epoch": 1.3284457478005864, |
| "grad_norm": 1.7052024839790294, |
| "learning_rate": 3.899757135580891e-05, |
| "loss": 0.4302, |
| "mean_token_accuracy": 0.8868528082966805, |
| "step": 227 |
| }, |
| { |
| "epoch": 1.3343108504398826, |
| "grad_norm": 1.8879042217654005, |
| "learning_rate": 3.898625620816681e-05, |
| "loss": 0.3146, |
| "mean_token_accuracy": 0.9062324613332748, |
| "step": 228 |
| }, |
| { |
| "epoch": 1.3401759530791788, |
| "grad_norm": 1.895441937578749, |
| "learning_rate": 3.8974879407275e-05, |
| "loss": 0.4562, |
| "mean_token_accuracy": 0.8757164552807808, |
| "step": 229 |
| }, |
| { |
| "epoch": 1.3460410557184752, |
| "grad_norm": 2.1290916797705077, |
| "learning_rate": 3.896344099442663e-05, |
| "loss": 0.3177, |
| "mean_token_accuracy": 0.904730461537838, |
| "step": 230 |
| }, |
| { |
| "epoch": 1.3519061583577714, |
| "grad_norm": 1.5925408878888774, |
| "learning_rate": 3.895194101113855e-05, |
| "loss": 0.2663, |
| "mean_token_accuracy": 0.9067297652363777, |
| "step": 231 |
| }, |
| { |
| "epoch": 1.3577712609970676, |
| "grad_norm": 1.6755193524016967, |
| "learning_rate": 3.894037949915104e-05, |
| "loss": 0.3058, |
| "mean_token_accuracy": 0.9183206856250763, |
| "step": 232 |
| }, |
| { |
| "epoch": 1.3636363636363638, |
| "grad_norm": 1.4261041380447086, |
| "learning_rate": 3.8928756500427735e-05, |
| "loss": 0.3274, |
| "mean_token_accuracy": 0.9030020982027054, |
| "step": 233 |
| }, |
| { |
| "epoch": 1.36950146627566, |
| "grad_norm": 1.7424232651440439, |
| "learning_rate": 3.89170720571554e-05, |
| "loss": 0.2836, |
| "mean_token_accuracy": 0.9171391725540161, |
| "step": 234 |
| }, |
| { |
| "epoch": 1.3753665689149561, |
| "grad_norm": 1.5473986942703324, |
| "learning_rate": 3.890532621174387e-05, |
| "loss": 0.311, |
| "mean_token_accuracy": 0.9047387689352036, |
| "step": 235 |
| }, |
| { |
| "epoch": 1.3812316715542523, |
| "grad_norm": 1.4904609752021027, |
| "learning_rate": 3.8893519006825806e-05, |
| "loss": 0.2811, |
| "mean_token_accuracy": 0.9175030738115311, |
| "step": 236 |
| }, |
| { |
| "epoch": 1.3870967741935485, |
| "grad_norm": 1.6614527615879586, |
| "learning_rate": 3.88816504852566e-05, |
| "loss": 0.2803, |
| "mean_token_accuracy": 0.9204106256365776, |
| "step": 237 |
| }, |
| { |
| "epoch": 1.3929618768328447, |
| "grad_norm": 1.660701923358152, |
| "learning_rate": 3.886972069011419e-05, |
| "loss": 0.4732, |
| "mean_token_accuracy": 0.878779798746109, |
| "step": 238 |
| }, |
| { |
| "epoch": 1.3988269794721409, |
| "grad_norm": 1.8193960520526216, |
| "learning_rate": 3.885772966469891e-05, |
| "loss": 0.3177, |
| "mean_token_accuracy": 0.9024636000394821, |
| "step": 239 |
| }, |
| { |
| "epoch": 1.404692082111437, |
| "grad_norm": 1.6110661415608767, |
| "learning_rate": 3.884567745253335e-05, |
| "loss": 0.2395, |
| "mean_token_accuracy": 0.9258132427930832, |
| "step": 240 |
| }, |
| { |
| "epoch": 1.4105571847507332, |
| "grad_norm": 1.4342539536793646, |
| "learning_rate": 3.8833564097362157e-05, |
| "loss": 0.3794, |
| "mean_token_accuracy": 0.8934561610221863, |
| "step": 241 |
| }, |
| { |
| "epoch": 1.4164222873900294, |
| "grad_norm": 1.5191845667740436, |
| "learning_rate": 3.8821389643151924e-05, |
| "loss": 0.2483, |
| "mean_token_accuracy": 0.9304336309432983, |
| "step": 242 |
| }, |
| { |
| "epoch": 1.4222873900293256, |
| "grad_norm": 1.5906029983066425, |
| "learning_rate": 3.880915413409102e-05, |
| "loss": 0.289, |
| "mean_token_accuracy": 0.9240436926484108, |
| "step": 243 |
| }, |
| { |
| "epoch": 1.4281524926686218, |
| "grad_norm": 1.5750449999867253, |
| "learning_rate": 3.879685761458938e-05, |
| "loss": 0.4022, |
| "mean_token_accuracy": 0.8791051730513573, |
| "step": 244 |
| }, |
| { |
| "epoch": 1.434017595307918, |
| "grad_norm": 1.6746780717366057, |
| "learning_rate": 3.8784500129278405e-05, |
| "loss": 0.2427, |
| "mean_token_accuracy": 0.9278705045580864, |
| "step": 245 |
| }, |
| { |
| "epoch": 1.4398826979472141, |
| "grad_norm": 1.70626429733012, |
| "learning_rate": 3.877208172301079e-05, |
| "loss": 0.4178, |
| "mean_token_accuracy": 0.8769783824682236, |
| "step": 246 |
| }, |
| { |
| "epoch": 1.4457478005865103, |
| "grad_norm": 1.5645704363193036, |
| "learning_rate": 3.875960244086032e-05, |
| "loss": 0.3022, |
| "mean_token_accuracy": 0.9060285091400146, |
| "step": 247 |
| }, |
| { |
| "epoch": 1.4516129032258065, |
| "grad_norm": 1.5927324346376226, |
| "learning_rate": 3.8747062328121756e-05, |
| "loss": 0.3328, |
| "mean_token_accuracy": 0.9143104031682014, |
| "step": 248 |
| }, |
| { |
| "epoch": 1.4574780058651027, |
| "grad_norm": 1.3295756265415142, |
| "learning_rate": 3.873446143031064e-05, |
| "loss": 0.2642, |
| "mean_token_accuracy": 0.9250845462083817, |
| "step": 249 |
| }, |
| { |
| "epoch": 1.4633431085043989, |
| "grad_norm": 1.6451577919637062, |
| "learning_rate": 3.872179979316314e-05, |
| "loss": 0.2576, |
| "mean_token_accuracy": 0.9231655597686768, |
| "step": 250 |
| }, |
| { |
| "epoch": 1.469208211143695, |
| "grad_norm": 1.374617140630625, |
| "learning_rate": 3.870907746263589e-05, |
| "loss": 0.2344, |
| "mean_token_accuracy": 0.9281893447041512, |
| "step": 251 |
| }, |
| { |
| "epoch": 1.4750733137829912, |
| "grad_norm": 1.5385521582071795, |
| "learning_rate": 3.869629448490582e-05, |
| "loss": 0.3019, |
| "mean_token_accuracy": 0.9170176237821579, |
| "step": 252 |
| }, |
| { |
| "epoch": 1.4809384164222874, |
| "grad_norm": 1.4332428265169206, |
| "learning_rate": 3.868345090636995e-05, |
| "loss": 0.32, |
| "mean_token_accuracy": 0.9136760458350182, |
| "step": 253 |
| }, |
| { |
| "epoch": 1.4868035190615836, |
| "grad_norm": 1.8145070360131068, |
| "learning_rate": 3.867054677364531e-05, |
| "loss": 0.3077, |
| "mean_token_accuracy": 0.9085892364382744, |
| "step": 254 |
| }, |
| { |
| "epoch": 1.4926686217008798, |
| "grad_norm": 1.4540625255590833, |
| "learning_rate": 3.865758213356868e-05, |
| "loss": 0.3122, |
| "mean_token_accuracy": 0.9093359783291817, |
| "step": 255 |
| }, |
| { |
| "epoch": 1.498533724340176, |
| "grad_norm": 1.6678383017411527, |
| "learning_rate": 3.8644557033196456e-05, |
| "loss": 0.3175, |
| "mean_token_accuracy": 0.9082972332835197, |
| "step": 256 |
| }, |
| { |
| "epoch": 1.5043988269794721, |
| "grad_norm": 1.5866601580017656, |
| "learning_rate": 3.8631471519804514e-05, |
| "loss": 0.3432, |
| "mean_token_accuracy": 0.9088873639702797, |
| "step": 257 |
| }, |
| { |
| "epoch": 1.5102639296187683, |
| "grad_norm": 1.6958942000186437, |
| "learning_rate": 3.861832564088797e-05, |
| "loss": 0.3633, |
| "mean_token_accuracy": 0.8945498690009117, |
| "step": 258 |
| }, |
| { |
| "epoch": 1.5161290322580645, |
| "grad_norm": 1.6901057610483312, |
| "learning_rate": 3.860511944416105e-05, |
| "loss": 0.2474, |
| "mean_token_accuracy": 0.9269101545214653, |
| "step": 259 |
| }, |
| { |
| "epoch": 1.5219941348973607, |
| "grad_norm": 1.4062959541358844, |
| "learning_rate": 3.859185297755693e-05, |
| "loss": 0.2571, |
| "mean_token_accuracy": 0.9234707877039909, |
| "step": 260 |
| }, |
| { |
| "epoch": 1.5278592375366569, |
| "grad_norm": 1.293753749817753, |
| "learning_rate": 3.857852628922751e-05, |
| "loss": 0.224, |
| "mean_token_accuracy": 0.9362157136201859, |
| "step": 261 |
| }, |
| { |
| "epoch": 1.533724340175953, |
| "grad_norm": 1.7768770225023713, |
| "learning_rate": 3.856513942754329e-05, |
| "loss": 0.2784, |
| "mean_token_accuracy": 0.9158712923526764, |
| "step": 262 |
| }, |
| { |
| "epoch": 1.5395894428152492, |
| "grad_norm": 1.5368924710192076, |
| "learning_rate": 3.8551692441093183e-05, |
| "loss": 0.2147, |
| "mean_token_accuracy": 0.9343990013003349, |
| "step": 263 |
| }, |
| { |
| "epoch": 1.5454545454545454, |
| "grad_norm": 1.5208867150448775, |
| "learning_rate": 3.85381853786843e-05, |
| "loss": 0.3668, |
| "mean_token_accuracy": 0.8892128467559814, |
| "step": 264 |
| }, |
| { |
| "epoch": 1.5513196480938416, |
| "grad_norm": 1.615170986550086, |
| "learning_rate": 3.852461828934184e-05, |
| "loss": 0.3435, |
| "mean_token_accuracy": 0.907134085893631, |
| "step": 265 |
| }, |
| { |
| "epoch": 1.5571847507331378, |
| "grad_norm": 1.5028390308172306, |
| "learning_rate": 3.851099122230885e-05, |
| "loss": 0.2613, |
| "mean_token_accuracy": 0.9213738068938255, |
| "step": 266 |
| }, |
| { |
| "epoch": 1.563049853372434, |
| "grad_norm": 1.5527776825198953, |
| "learning_rate": 3.849730422704608e-05, |
| "loss": 0.3699, |
| "mean_token_accuracy": 0.9007752239704132, |
| "step": 267 |
| }, |
| { |
| "epoch": 1.5689149560117301, |
| "grad_norm": 1.68946384944025, |
| "learning_rate": 3.84835573532318e-05, |
| "loss": 0.2226, |
| "mean_token_accuracy": 0.9297270327806473, |
| "step": 268 |
| }, |
| { |
| "epoch": 1.5747800586510263, |
| "grad_norm": 1.8568160263580067, |
| "learning_rate": 3.84697506507616e-05, |
| "loss": 0.3495, |
| "mean_token_accuracy": 0.8976611867547035, |
| "step": 269 |
| }, |
| { |
| "epoch": 1.5806451612903225, |
| "grad_norm": 1.9946826389284182, |
| "learning_rate": 3.845588416974824e-05, |
| "loss": 0.3764, |
| "mean_token_accuracy": 0.9064979031682014, |
| "step": 270 |
| }, |
| { |
| "epoch": 1.5865102639296187, |
| "grad_norm": 1.6656738473748323, |
| "learning_rate": 3.844195796052144e-05, |
| "loss": 0.3061, |
| "mean_token_accuracy": 0.9155899211764336, |
| "step": 271 |
| }, |
| { |
| "epoch": 1.5923753665689149, |
| "grad_norm": 1.4699049383173204, |
| "learning_rate": 3.8427972073627724e-05, |
| "loss": 0.4471, |
| "mean_token_accuracy": 0.8815479129552841, |
| "step": 272 |
| }, |
| { |
| "epoch": 1.598240469208211, |
| "grad_norm": 1.7825727959157462, |
| "learning_rate": 3.841392655983021e-05, |
| "loss": 0.2179, |
| "mean_token_accuracy": 0.9388237595558167, |
| "step": 273 |
| }, |
| { |
| "epoch": 1.6041055718475072, |
| "grad_norm": 1.2309725165752188, |
| "learning_rate": 3.8399821470108444e-05, |
| "loss": 0.1733, |
| "mean_token_accuracy": 0.9496222510933876, |
| "step": 274 |
| }, |
| { |
| "epoch": 1.6099706744868034, |
| "grad_norm": 1.7497475431193252, |
| "learning_rate": 3.838565685565819e-05, |
| "loss": 0.41, |
| "mean_token_accuracy": 0.892163947224617, |
| "step": 275 |
| }, |
| { |
| "epoch": 1.6158357771260996, |
| "grad_norm": 1.6032760832968165, |
| "learning_rate": 3.8371432767891295e-05, |
| "loss": 0.3103, |
| "mean_token_accuracy": 0.9191496223211288, |
| "step": 276 |
| }, |
| { |
| "epoch": 1.6217008797653958, |
| "grad_norm": 1.475523533191468, |
| "learning_rate": 3.8357149258435444e-05, |
| "loss": 0.2458, |
| "mean_token_accuracy": 0.9312805011868477, |
| "step": 277 |
| }, |
| { |
| "epoch": 1.627565982404692, |
| "grad_norm": 1.4874016779495678, |
| "learning_rate": 3.8342806379134005e-05, |
| "loss": 0.3746, |
| "mean_token_accuracy": 0.8982912823557854, |
| "step": 278 |
| }, |
| { |
| "epoch": 1.6334310850439882, |
| "grad_norm": 1.5396252294388457, |
| "learning_rate": 3.8328404182045854e-05, |
| "loss": 0.3274, |
| "mean_token_accuracy": 0.9162414520978928, |
| "step": 279 |
| }, |
| { |
| "epoch": 1.6392961876832843, |
| "grad_norm": 1.9220523269412597, |
| "learning_rate": 3.831394271944512e-05, |
| "loss": 0.3118, |
| "mean_token_accuracy": 0.9187277778983116, |
| "step": 280 |
| }, |
| { |
| "epoch": 1.6451612903225805, |
| "grad_norm": 1.5408962396890666, |
| "learning_rate": 3.82994220438211e-05, |
| "loss": 0.2972, |
| "mean_token_accuracy": 0.9149600267410278, |
| "step": 281 |
| }, |
| { |
| "epoch": 1.6510263929618767, |
| "grad_norm": 1.6552152776718903, |
| "learning_rate": 3.828484220787797e-05, |
| "loss": 0.3117, |
| "mean_token_accuracy": 0.9124673455953598, |
| "step": 282 |
| }, |
| { |
| "epoch": 1.6568914956011729, |
| "grad_norm": 1.9823703515758113, |
| "learning_rate": 3.8270203264534644e-05, |
| "loss": 0.3958, |
| "mean_token_accuracy": 0.8929010257124901, |
| "step": 283 |
| }, |
| { |
| "epoch": 1.662756598240469, |
| "grad_norm": 1.477782103664971, |
| "learning_rate": 3.8255505266924585e-05, |
| "loss": 0.3019, |
| "mean_token_accuracy": 0.9156582951545715, |
| "step": 284 |
| }, |
| { |
| "epoch": 1.6686217008797652, |
| "grad_norm": 1.3242833914955063, |
| "learning_rate": 3.824074826839557e-05, |
| "loss": 0.2404, |
| "mean_token_accuracy": 0.93580362200737, |
| "step": 285 |
| }, |
| { |
| "epoch": 1.6744868035190614, |
| "grad_norm": 2.0670290487108134, |
| "learning_rate": 3.822593232250956e-05, |
| "loss": 0.4481, |
| "mean_token_accuracy": 0.8762071877717972, |
| "step": 286 |
| }, |
| { |
| "epoch": 1.6803519061583576, |
| "grad_norm": 1.8797288846958915, |
| "learning_rate": 3.8211057483042446e-05, |
| "loss": 0.4279, |
| "mean_token_accuracy": 0.8910372480750084, |
| "step": 287 |
| }, |
| { |
| "epoch": 1.6862170087976538, |
| "grad_norm": 1.5498598053807129, |
| "learning_rate": 3.8196123803983895e-05, |
| "loss": 0.3023, |
| "mean_token_accuracy": 0.917064480483532, |
| "step": 288 |
| }, |
| { |
| "epoch": 1.6920821114369502, |
| "grad_norm": 1.6259661356334203, |
| "learning_rate": 3.818113133953712e-05, |
| "loss": 0.2943, |
| "mean_token_accuracy": 0.9172269403934479, |
| "step": 289 |
| }, |
| { |
| "epoch": 1.6979472140762464, |
| "grad_norm": 1.3857325306672972, |
| "learning_rate": 3.816608014411872e-05, |
| "loss": 0.2045, |
| "mean_token_accuracy": 0.9397373795509338, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.7038123167155426, |
| "grad_norm": 1.3545393801192716, |
| "learning_rate": 3.815097027235845e-05, |
| "loss": 0.3132, |
| "mean_token_accuracy": 0.9108520373702049, |
| "step": 291 |
| }, |
| { |
| "epoch": 1.7096774193548387, |
| "grad_norm": 1.6993235166452834, |
| "learning_rate": 3.813580177909906e-05, |
| "loss": 0.242, |
| "mean_token_accuracy": 0.9247912764549255, |
| "step": 292 |
| }, |
| { |
| "epoch": 1.715542521994135, |
| "grad_norm": 1.062509305690891, |
| "learning_rate": 3.8120574719396023e-05, |
| "loss": 0.2618, |
| "mean_token_accuracy": 0.9303434118628502, |
| "step": 293 |
| }, |
| { |
| "epoch": 1.721407624633431, |
| "grad_norm": 1.7701722344281323, |
| "learning_rate": 3.810528914851745e-05, |
| "loss": 0.3753, |
| "mean_token_accuracy": 0.900924563407898, |
| "step": 294 |
| }, |
| { |
| "epoch": 1.7272727272727273, |
| "grad_norm": 1.5877419428096278, |
| "learning_rate": 3.808994512194376e-05, |
| "loss": 0.3402, |
| "mean_token_accuracy": 0.8950434923171997, |
| "step": 295 |
| }, |
| { |
| "epoch": 1.7331378299120235, |
| "grad_norm": 1.6610643623647858, |
| "learning_rate": 3.807454269536758e-05, |
| "loss": 0.34, |
| "mean_token_accuracy": 0.9047321453690529, |
| "step": 296 |
| }, |
| { |
| "epoch": 1.7390029325513197, |
| "grad_norm": 1.4505169368504018, |
| "learning_rate": 3.805908192469351e-05, |
| "loss": 0.224, |
| "mean_token_accuracy": 0.9305636957287788, |
| "step": 297 |
| }, |
| { |
| "epoch": 1.7448680351906158, |
| "grad_norm": 1.4096184186739809, |
| "learning_rate": 3.80435628660379e-05, |
| "loss": 0.3201, |
| "mean_token_accuracy": 0.9103965312242508, |
| "step": 298 |
| }, |
| { |
| "epoch": 1.750733137829912, |
| "grad_norm": 1.1931765360526814, |
| "learning_rate": 3.802798557572867e-05, |
| "loss": 0.311, |
| "mean_token_accuracy": 0.9164270684123039, |
| "step": 299 |
| }, |
| { |
| "epoch": 1.7565982404692082, |
| "grad_norm": 1.6327117487156346, |
| "learning_rate": 3.801235011030506e-05, |
| "loss": 0.3023, |
| "mean_token_accuracy": 0.9153658151626587, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.7624633431085044, |
| "grad_norm": 1.3440051164242026, |
| "learning_rate": 3.799665652651754e-05, |
| "loss": 0.1817, |
| "mean_token_accuracy": 0.951392151415348, |
| "step": 301 |
| }, |
| { |
| "epoch": 1.7683284457478006, |
| "grad_norm": 1.2516019247625034, |
| "learning_rate": 3.7980904881327446e-05, |
| "loss": 0.2478, |
| "mean_token_accuracy": 0.9341270625591278, |
| "step": 302 |
| }, |
| { |
| "epoch": 1.7741935483870968, |
| "grad_norm": 1.5459031897179347, |
| "learning_rate": 3.796509523190691e-05, |
| "loss": 0.2693, |
| "mean_token_accuracy": 0.918998509645462, |
| "step": 303 |
| }, |
| { |
| "epoch": 1.780058651026393, |
| "grad_norm": 1.3298611087881873, |
| "learning_rate": 3.794922763563857e-05, |
| "loss": 0.2135, |
| "mean_token_accuracy": 0.9386330172419548, |
| "step": 304 |
| }, |
| { |
| "epoch": 1.7859237536656891, |
| "grad_norm": 1.7138314899426605, |
| "learning_rate": 3.793330215011538e-05, |
| "loss": 0.3072, |
| "mean_token_accuracy": 0.9276101067662239, |
| "step": 305 |
| }, |
| { |
| "epoch": 1.7917888563049853, |
| "grad_norm": 1.4545273510274497, |
| "learning_rate": 3.791731883314043e-05, |
| "loss": 0.2681, |
| "mean_token_accuracy": 0.922805443406105, |
| "step": 306 |
| }, |
| { |
| "epoch": 1.7976539589442815, |
| "grad_norm": 1.557251488291821, |
| "learning_rate": 3.790127774272671e-05, |
| "loss": 0.233, |
| "mean_token_accuracy": 0.9324622675776482, |
| "step": 307 |
| }, |
| { |
| "epoch": 1.8035190615835777, |
| "grad_norm": 1.4492526540365742, |
| "learning_rate": 3.7885178937096884e-05, |
| "loss": 0.3703, |
| "mean_token_accuracy": 0.90493393689394, |
| "step": 308 |
| }, |
| { |
| "epoch": 1.8093841642228738, |
| "grad_norm": 1.5281139962268382, |
| "learning_rate": 3.7869022474683125e-05, |
| "loss": 0.4118, |
| "mean_token_accuracy": 0.9041909128427505, |
| "step": 309 |
| }, |
| { |
| "epoch": 1.8152492668621703, |
| "grad_norm": 1.93639219005886, |
| "learning_rate": 3.7852808414126876e-05, |
| "loss": 0.3565, |
| "mean_token_accuracy": 0.9011876359581947, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.8211143695014664, |
| "grad_norm": 1.4288238334885668, |
| "learning_rate": 3.783653681427861e-05, |
| "loss": 0.2322, |
| "mean_token_accuracy": 0.9318009614944458, |
| "step": 311 |
| }, |
| { |
| "epoch": 1.8269794721407626, |
| "grad_norm": 1.5419872027394301, |
| "learning_rate": 3.7820207734197676e-05, |
| "loss": 0.3153, |
| "mean_token_accuracy": 0.915338508784771, |
| "step": 312 |
| }, |
| { |
| "epoch": 1.8328445747800588, |
| "grad_norm": 1.4488167015309188, |
| "learning_rate": 3.780382123315203e-05, |
| "loss": 0.1989, |
| "mean_token_accuracy": 0.9429738447070122, |
| "step": 313 |
| }, |
| { |
| "epoch": 1.838709677419355, |
| "grad_norm": 1.3031287573895856, |
| "learning_rate": 3.778737737061807e-05, |
| "loss": 0.3113, |
| "mean_token_accuracy": 0.9149032607674599, |
| "step": 314 |
| }, |
| { |
| "epoch": 1.8445747800586512, |
| "grad_norm": 1.4920076612556301, |
| "learning_rate": 3.777087620628035e-05, |
| "loss": 0.2359, |
| "mean_token_accuracy": 0.9325834512710571, |
| "step": 315 |
| }, |
| { |
| "epoch": 1.8504398826979473, |
| "grad_norm": 1.385078335478815, |
| "learning_rate": 3.775431780003145e-05, |
| "loss": 0.2044, |
| "mean_token_accuracy": 0.9399889931082726, |
| "step": 316 |
| }, |
| { |
| "epoch": 1.8563049853372435, |
| "grad_norm": 1.269824397497335, |
| "learning_rate": 3.7737702211971684e-05, |
| "loss": 0.2496, |
| "mean_token_accuracy": 0.9346247911453247, |
| "step": 317 |
| }, |
| { |
| "epoch": 1.8621700879765397, |
| "grad_norm": 1.4411880057880286, |
| "learning_rate": 3.772102950240895e-05, |
| "loss": 0.2563, |
| "mean_token_accuracy": 0.9334022998809814, |
| "step": 318 |
| }, |
| { |
| "epoch": 1.868035190615836, |
| "grad_norm": 1.532918693941707, |
| "learning_rate": 3.770429973185842e-05, |
| "loss": 0.2856, |
| "mean_token_accuracy": 0.9228765368461609, |
| "step": 319 |
| }, |
| { |
| "epoch": 1.873900293255132, |
| "grad_norm": 1.508327365783947, |
| "learning_rate": 3.768751296104243e-05, |
| "loss": 0.1919, |
| "mean_token_accuracy": 0.9403479546308517, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.8797653958944283, |
| "grad_norm": 1.2191592802013955, |
| "learning_rate": 3.767066925089017e-05, |
| "loss": 0.2863, |
| "mean_token_accuracy": 0.9149582833051682, |
| "step": 321 |
| }, |
| { |
| "epoch": 1.8856304985337244, |
| "grad_norm": 1.4714233419752547, |
| "learning_rate": 3.765376866253749e-05, |
| "loss": 0.199, |
| "mean_token_accuracy": 0.9367243573069572, |
| "step": 322 |
| }, |
| { |
| "epoch": 1.8914956011730206, |
| "grad_norm": 1.3785113383476932, |
| "learning_rate": 3.763681125732672e-05, |
| "loss": 0.2652, |
| "mean_token_accuracy": 0.9183213263750076, |
| "step": 323 |
| }, |
| { |
| "epoch": 1.8973607038123168, |
| "grad_norm": 1.5448358403304276, |
| "learning_rate": 3.7619797096806386e-05, |
| "loss": 0.2859, |
| "mean_token_accuracy": 0.9174121469259262, |
| "step": 324 |
| }, |
| { |
| "epoch": 1.903225806451613, |
| "grad_norm": 1.4396711929830184, |
| "learning_rate": 3.7602726242731016e-05, |
| "loss": 0.3124, |
| "mean_token_accuracy": 0.9157911166548729, |
| "step": 325 |
| }, |
| { |
| "epoch": 1.9090909090909092, |
| "grad_norm": 1.437625652493676, |
| "learning_rate": 3.758559875706092e-05, |
| "loss": 0.2302, |
| "mean_token_accuracy": 0.9349333196878433, |
| "step": 326 |
| }, |
| { |
| "epoch": 1.9149560117302054, |
| "grad_norm": 1.2452960419013337, |
| "learning_rate": 3.756841470196195e-05, |
| "loss": 0.3109, |
| "mean_token_accuracy": 0.9157072603702545, |
| "step": 327 |
| }, |
| { |
| "epoch": 1.9208211143695015, |
| "grad_norm": 1.3037538478148276, |
| "learning_rate": 3.7551174139805284e-05, |
| "loss": 0.3212, |
| "mean_token_accuracy": 0.9096843525767326, |
| "step": 328 |
| }, |
| { |
| "epoch": 1.9266862170087977, |
| "grad_norm": 1.625979083841291, |
| "learning_rate": 3.75338771331672e-05, |
| "loss": 0.3253, |
| "mean_token_accuracy": 0.9081972911953926, |
| "step": 329 |
| }, |
| { |
| "epoch": 1.932551319648094, |
| "grad_norm": 1.5169434445509558, |
| "learning_rate": 3.7516523744828856e-05, |
| "loss": 0.3588, |
| "mean_token_accuracy": 0.9001770913600922, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.93841642228739, |
| "grad_norm": 1.530352955863984, |
| "learning_rate": 3.7499114037776036e-05, |
| "loss": 0.2789, |
| "mean_token_accuracy": 0.9110410585999489, |
| "step": 331 |
| }, |
| { |
| "epoch": 1.9442815249266863, |
| "grad_norm": 1.6088019528257314, |
| "learning_rate": 3.748164807519894e-05, |
| "loss": 0.4174, |
| "mean_token_accuracy": 0.8930394127964973, |
| "step": 332 |
| }, |
| { |
| "epoch": 1.9501466275659824, |
| "grad_norm": 1.8565709071738816, |
| "learning_rate": 3.746412592049197e-05, |
| "loss": 0.3197, |
| "mean_token_accuracy": 0.9104765355587006, |
| "step": 333 |
| }, |
| { |
| "epoch": 1.9560117302052786, |
| "grad_norm": 1.33049768118604, |
| "learning_rate": 3.7446547637253464e-05, |
| "loss": 0.1996, |
| "mean_token_accuracy": 0.9462396278977394, |
| "step": 334 |
| }, |
| { |
| "epoch": 1.9618768328445748, |
| "grad_norm": 1.4386241294013715, |
| "learning_rate": 3.742891328928549e-05, |
| "loss": 0.284, |
| "mean_token_accuracy": 0.9256806001067162, |
| "step": 335 |
| }, |
| { |
| "epoch": 1.967741935483871, |
| "grad_norm": 1.1259642533453769, |
| "learning_rate": 3.74112229405936e-05, |
| "loss": 0.2623, |
| "mean_token_accuracy": 0.9194123968482018, |
| "step": 336 |
| }, |
| { |
| "epoch": 1.9736070381231672, |
| "grad_norm": 1.2580114294563631, |
| "learning_rate": 3.739347665538664e-05, |
| "loss": 0.2717, |
| "mean_token_accuracy": 0.9298633262515068, |
| "step": 337 |
| }, |
| { |
| "epoch": 1.9794721407624634, |
| "grad_norm": 1.5397240772403886, |
| "learning_rate": 3.7375674498076445e-05, |
| "loss": 0.3439, |
| "mean_token_accuracy": 0.9038892313838005, |
| "step": 338 |
| }, |
| { |
| "epoch": 1.9853372434017595, |
| "grad_norm": 1.6069200337143423, |
| "learning_rate": 3.7357816533277646e-05, |
| "loss": 0.2785, |
| "mean_token_accuracy": 0.9279282689094543, |
| "step": 339 |
| }, |
| { |
| "epoch": 1.9912023460410557, |
| "grad_norm": 1.3029156392114332, |
| "learning_rate": 3.733990282580745e-05, |
| "loss": 0.2791, |
| "mean_token_accuracy": 0.9194482937455177, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.997067448680352, |
| "grad_norm": 1.559895084315268, |
| "learning_rate": 3.732193344068539e-05, |
| "loss": 0.2702, |
| "mean_token_accuracy": 0.9247054308652878, |
| "step": 341 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 1.9400108632625268, |
| "learning_rate": 3.7303908443133054e-05, |
| "loss": 0.1662, |
| "mean_token_accuracy": 0.9493132084608078, |
| "step": 342 |
| }, |
| { |
| "epoch": 2.005865102639296, |
| "grad_norm": 1.2407168217525242, |
| "learning_rate": 3.728582789857393e-05, |
| "loss": 0.1643, |
| "mean_token_accuracy": 0.9579492285847664, |
| "step": 343 |
| }, |
| { |
| "epoch": 2.0117302052785924, |
| "grad_norm": 1.3093362788237175, |
| "learning_rate": 3.726769187263308e-05, |
| "loss": 0.1865, |
| "mean_token_accuracy": 0.9434748664498329, |
| "step": 344 |
| }, |
| { |
| "epoch": 2.0175953079178885, |
| "grad_norm": 1.1648794190467764, |
| "learning_rate": 3.724950043113695e-05, |
| "loss": 0.1359, |
| "mean_token_accuracy": 0.9601836279034615, |
| "step": 345 |
| }, |
| { |
| "epoch": 2.0234604105571847, |
| "grad_norm": 1.1333150827756964, |
| "learning_rate": 3.723125364011313e-05, |
| "loss": 0.1379, |
| "mean_token_accuracy": 0.95941511541605, |
| "step": 346 |
| }, |
| { |
| "epoch": 2.029325513196481, |
| "grad_norm": 1.1964853813194998, |
| "learning_rate": 3.7212951565790094e-05, |
| "loss": 0.1448, |
| "mean_token_accuracy": 0.9546831250190735, |
| "step": 347 |
| }, |
| { |
| "epoch": 2.035190615835777, |
| "grad_norm": 1.2734515348322968, |
| "learning_rate": 3.7194594274597e-05, |
| "loss": 0.1495, |
| "mean_token_accuracy": 0.9548632949590683, |
| "step": 348 |
| }, |
| { |
| "epoch": 2.0410557184750733, |
| "grad_norm": 1.2877766399858757, |
| "learning_rate": 3.7176181833163385e-05, |
| "loss": 0.1739, |
| "mean_token_accuracy": 0.9461784809827805, |
| "step": 349 |
| }, |
| { |
| "epoch": 2.0469208211143695, |
| "grad_norm": 1.413810988292415, |
| "learning_rate": 3.7157714308318966e-05, |
| "loss": 0.1596, |
| "mean_token_accuracy": 0.9559041485190392, |
| "step": 350 |
| }, |
| { |
| "epoch": 2.0527859237536656, |
| "grad_norm": 1.5026104069307236, |
| "learning_rate": 3.713919176709343e-05, |
| "loss": 0.1985, |
| "mean_token_accuracy": 0.9448290690779686, |
| "step": 351 |
| }, |
| { |
| "epoch": 2.058651026392962, |
| "grad_norm": 1.2187901250703708, |
| "learning_rate": 3.712061427671609e-05, |
| "loss": 0.1305, |
| "mean_token_accuracy": 0.9609132781624794, |
| "step": 352 |
| }, |
| { |
| "epoch": 2.064516129032258, |
| "grad_norm": 1.2188630469947228, |
| "learning_rate": 3.710198190461575e-05, |
| "loss": 0.1763, |
| "mean_token_accuracy": 0.955159068107605, |
| "step": 353 |
| }, |
| { |
| "epoch": 2.070381231671554, |
| "grad_norm": 1.2160320840757712, |
| "learning_rate": 3.7083294718420394e-05, |
| "loss": 0.1674, |
| "mean_token_accuracy": 0.9511153474450111, |
| "step": 354 |
| }, |
| { |
| "epoch": 2.0762463343108504, |
| "grad_norm": 1.4125365150094613, |
| "learning_rate": 3.706455278595696e-05, |
| "loss": 0.1646, |
| "mean_token_accuracy": 0.9504409730434418, |
| "step": 355 |
| }, |
| { |
| "epoch": 2.0821114369501466, |
| "grad_norm": 1.2271156324554804, |
| "learning_rate": 3.7045756175251086e-05, |
| "loss": 0.1575, |
| "mean_token_accuracy": 0.9546771794557571, |
| "step": 356 |
| }, |
| { |
| "epoch": 2.0879765395894427, |
| "grad_norm": 1.2028870317780662, |
| "learning_rate": 3.7026904954526884e-05, |
| "loss": 0.1424, |
| "mean_token_accuracy": 0.9531672671437263, |
| "step": 357 |
| }, |
| { |
| "epoch": 2.093841642228739, |
| "grad_norm": 1.3273793231793187, |
| "learning_rate": 3.7007999192206676e-05, |
| "loss": 0.1488, |
| "mean_token_accuracy": 0.9596338272094727, |
| "step": 358 |
| }, |
| { |
| "epoch": 2.099706744868035, |
| "grad_norm": 1.1844630480638296, |
| "learning_rate": 3.698903895691073e-05, |
| "loss": 0.17, |
| "mean_token_accuracy": 0.9442361816763878, |
| "step": 359 |
| }, |
| { |
| "epoch": 2.1055718475073313, |
| "grad_norm": 1.2591299685961057, |
| "learning_rate": 3.697002431745706e-05, |
| "loss": 0.1597, |
| "mean_token_accuracy": 0.9520756751298904, |
| "step": 360 |
| }, |
| { |
| "epoch": 2.1114369501466275, |
| "grad_norm": 1.1925719447582808, |
| "learning_rate": 3.695095534286111e-05, |
| "loss": 0.1782, |
| "mean_token_accuracy": 0.9533992558717728, |
| "step": 361 |
| }, |
| { |
| "epoch": 2.1173020527859236, |
| "grad_norm": 1.2744217950123338, |
| "learning_rate": 3.693183210233557e-05, |
| "loss": 0.1712, |
| "mean_token_accuracy": 0.9536459594964981, |
| "step": 362 |
| }, |
| { |
| "epoch": 2.12316715542522, |
| "grad_norm": 1.306131992732695, |
| "learning_rate": 3.691265466529007e-05, |
| "loss": 0.1806, |
| "mean_token_accuracy": 0.9424840211868286, |
| "step": 363 |
| }, |
| { |
| "epoch": 2.129032258064516, |
| "grad_norm": 1.1582207478757602, |
| "learning_rate": 3.689342310133097e-05, |
| "loss": 0.1433, |
| "mean_token_accuracy": 0.9588482677936554, |
| "step": 364 |
| }, |
| { |
| "epoch": 2.134897360703812, |
| "grad_norm": 1.0936038450708818, |
| "learning_rate": 3.687413748026108e-05, |
| "loss": 0.1545, |
| "mean_token_accuracy": 0.9539923518896103, |
| "step": 365 |
| }, |
| { |
| "epoch": 2.1407624633431084, |
| "grad_norm": 1.1150867305503824, |
| "learning_rate": 3.68547978720794e-05, |
| "loss": 0.1487, |
| "mean_token_accuracy": 0.9566026851534843, |
| "step": 366 |
| }, |
| { |
| "epoch": 2.1466275659824046, |
| "grad_norm": 1.2512990373026573, |
| "learning_rate": 3.683540434698093e-05, |
| "loss": 0.1438, |
| "mean_token_accuracy": 0.9571522250771523, |
| "step": 367 |
| }, |
| { |
| "epoch": 2.1524926686217007, |
| "grad_norm": 1.1303528659447613, |
| "learning_rate": 3.681595697535629e-05, |
| "loss": 0.1417, |
| "mean_token_accuracy": 0.9597708955407143, |
| "step": 368 |
| }, |
| { |
| "epoch": 2.158357771260997, |
| "grad_norm": 1.1288199363197544, |
| "learning_rate": 3.6796455827791614e-05, |
| "loss": 0.1367, |
| "mean_token_accuracy": 0.9587919190526009, |
| "step": 369 |
| }, |
| { |
| "epoch": 2.164222873900293, |
| "grad_norm": 1.3375789572979553, |
| "learning_rate": 3.677690097506819e-05, |
| "loss": 0.1657, |
| "mean_token_accuracy": 0.952460877597332, |
| "step": 370 |
| }, |
| { |
| "epoch": 2.1700879765395893, |
| "grad_norm": 1.409154566117624, |
| "learning_rate": 3.6757292488162224e-05, |
| "loss": 0.1692, |
| "mean_token_accuracy": 0.9513570293784142, |
| "step": 371 |
| }, |
| { |
| "epoch": 2.1759530791788855, |
| "grad_norm": 1.2987241443422721, |
| "learning_rate": 3.673763043824461e-05, |
| "loss": 0.1854, |
| "mean_token_accuracy": 0.9414068311452866, |
| "step": 372 |
| }, |
| { |
| "epoch": 2.1818181818181817, |
| "grad_norm": 1.3236180598652694, |
| "learning_rate": 3.671791489668065e-05, |
| "loss": 0.1627, |
| "mean_token_accuracy": 0.9539598226547241, |
| "step": 373 |
| }, |
| { |
| "epoch": 2.187683284457478, |
| "grad_norm": 1.2943073517775734, |
| "learning_rate": 3.6698145935029794e-05, |
| "loss": 0.1418, |
| "mean_token_accuracy": 0.9585407078266144, |
| "step": 374 |
| }, |
| { |
| "epoch": 2.193548387096774, |
| "grad_norm": 1.1789057411236086, |
| "learning_rate": 3.66783236250454e-05, |
| "loss": 0.1518, |
| "mean_token_accuracy": 0.953452080488205, |
| "step": 375 |
| }, |
| { |
| "epoch": 2.19941348973607, |
| "grad_norm": 1.389218125126014, |
| "learning_rate": 3.665844803867443e-05, |
| "loss": 0.1719, |
| "mean_token_accuracy": 0.9476408511400223, |
| "step": 376 |
| }, |
| { |
| "epoch": 2.2052785923753664, |
| "grad_norm": 1.3030670440092282, |
| "learning_rate": 3.663851924805725e-05, |
| "loss": 0.1798, |
| "mean_token_accuracy": 0.9471158385276794, |
| "step": 377 |
| }, |
| { |
| "epoch": 2.2111436950146626, |
| "grad_norm": 1.2566016948623684, |
| "learning_rate": 3.66185373255273e-05, |
| "loss": 0.166, |
| "mean_token_accuracy": 0.9486287534236908, |
| "step": 378 |
| }, |
| { |
| "epoch": 2.2170087976539588, |
| "grad_norm": 1.0379347645872854, |
| "learning_rate": 3.6598502343610906e-05, |
| "loss": 0.1297, |
| "mean_token_accuracy": 0.9611979499459267, |
| "step": 379 |
| }, |
| { |
| "epoch": 2.222873900293255, |
| "grad_norm": 1.3188761964469562, |
| "learning_rate": 3.657841437502697e-05, |
| "loss": 0.2066, |
| "mean_token_accuracy": 0.9354860931634903, |
| "step": 380 |
| }, |
| { |
| "epoch": 2.228739002932551, |
| "grad_norm": 1.43705231218096, |
| "learning_rate": 3.6558273492686686e-05, |
| "loss": 0.1769, |
| "mean_token_accuracy": 0.946281909942627, |
| "step": 381 |
| }, |
| { |
| "epoch": 2.2346041055718473, |
| "grad_norm": 1.1828401594348896, |
| "learning_rate": 3.6538079769693334e-05, |
| "loss": 0.1548, |
| "mean_token_accuracy": 0.9557049721479416, |
| "step": 382 |
| }, |
| { |
| "epoch": 2.2404692082111435, |
| "grad_norm": 1.1169926579410214, |
| "learning_rate": 3.6517833279341954e-05, |
| "loss": 0.1304, |
| "mean_token_accuracy": 0.9621347039937973, |
| "step": 383 |
| }, |
| { |
| "epoch": 2.2463343108504397, |
| "grad_norm": 1.1389674170183997, |
| "learning_rate": 3.649753409511916e-05, |
| "loss": 0.1398, |
| "mean_token_accuracy": 0.9618229940533638, |
| "step": 384 |
| }, |
| { |
| "epoch": 2.252199413489736, |
| "grad_norm": 1.2539839985656354, |
| "learning_rate": 3.6477182290702766e-05, |
| "loss": 0.1722, |
| "mean_token_accuracy": 0.9477048069238663, |
| "step": 385 |
| }, |
| { |
| "epoch": 2.258064516129032, |
| "grad_norm": 1.180531998527333, |
| "learning_rate": 3.645677793996161e-05, |
| "loss": 0.1736, |
| "mean_token_accuracy": 0.9495566114783287, |
| "step": 386 |
| }, |
| { |
| "epoch": 2.263929618768328, |
| "grad_norm": 1.2558424458444957, |
| "learning_rate": 3.643632111695525e-05, |
| "loss": 0.1739, |
| "mean_token_accuracy": 0.9519843608140945, |
| "step": 387 |
| }, |
| { |
| "epoch": 2.2697947214076244, |
| "grad_norm": 1.178692770955397, |
| "learning_rate": 3.6415811895933685e-05, |
| "loss": 0.1586, |
| "mean_token_accuracy": 0.9524863511323929, |
| "step": 388 |
| }, |
| { |
| "epoch": 2.2756598240469206, |
| "grad_norm": 1.0834395414342137, |
| "learning_rate": 3.639525035133712e-05, |
| "loss": 0.1353, |
| "mean_token_accuracy": 0.9634513407945633, |
| "step": 389 |
| }, |
| { |
| "epoch": 2.281524926686217, |
| "grad_norm": 1.2781183197507804, |
| "learning_rate": 3.637463655779563e-05, |
| "loss": 0.1813, |
| "mean_token_accuracy": 0.9502886831760406, |
| "step": 390 |
| }, |
| { |
| "epoch": 2.2873900293255134, |
| "grad_norm": 1.1027963255369508, |
| "learning_rate": 3.6353970590128975e-05, |
| "loss": 0.1387, |
| "mean_token_accuracy": 0.9600658416748047, |
| "step": 391 |
| }, |
| { |
| "epoch": 2.2932551319648096, |
| "grad_norm": 1.0503905361604666, |
| "learning_rate": 3.633325252334628e-05, |
| "loss": 0.1462, |
| "mean_token_accuracy": 0.9516377374529839, |
| "step": 392 |
| }, |
| { |
| "epoch": 2.2991202346041058, |
| "grad_norm": 1.3599511368264618, |
| "learning_rate": 3.6312482432645746e-05, |
| "loss": 0.1947, |
| "mean_token_accuracy": 0.9380109906196594, |
| "step": 393 |
| }, |
| { |
| "epoch": 2.304985337243402, |
| "grad_norm": 1.2159860550424557, |
| "learning_rate": 3.6291660393414414e-05, |
| "loss": 0.1361, |
| "mean_token_accuracy": 0.9593810513615608, |
| "step": 394 |
| }, |
| { |
| "epoch": 2.310850439882698, |
| "grad_norm": 1.199526169764911, |
| "learning_rate": 3.6270786481227885e-05, |
| "loss": 0.1561, |
| "mean_token_accuracy": 0.9567776471376419, |
| "step": 395 |
| }, |
| { |
| "epoch": 2.3167155425219943, |
| "grad_norm": 1.2570141761370908, |
| "learning_rate": 3.624986077185003e-05, |
| "loss": 0.1582, |
| "mean_token_accuracy": 0.9552087634801865, |
| "step": 396 |
| }, |
| { |
| "epoch": 2.3225806451612905, |
| "grad_norm": 1.1242395760244872, |
| "learning_rate": 3.622888334123272e-05, |
| "loss": 0.1484, |
| "mean_token_accuracy": 0.9601116627454758, |
| "step": 397 |
| }, |
| { |
| "epoch": 2.3284457478005867, |
| "grad_norm": 1.0414112129082755, |
| "learning_rate": 3.620785426551555e-05, |
| "loss": 0.1372, |
| "mean_token_accuracy": 0.9608481675386429, |
| "step": 398 |
| }, |
| { |
| "epoch": 2.334310850439883, |
| "grad_norm": 1.1917239913966557, |
| "learning_rate": 3.618677362102558e-05, |
| "loss": 0.1343, |
| "mean_token_accuracy": 0.9602387845516205, |
| "step": 399 |
| }, |
| { |
| "epoch": 2.340175953079179, |
| "grad_norm": 1.4226312444436942, |
| "learning_rate": 3.616564148427703e-05, |
| "loss": 0.1568, |
| "mean_token_accuracy": 0.9512313082814217, |
| "step": 400 |
| }, |
| { |
| "epoch": 2.346041055718475, |
| "grad_norm": 1.2485026183547716, |
| "learning_rate": 3.614445793197103e-05, |
| "loss": 0.1535, |
| "mean_token_accuracy": 0.9574841260910034, |
| "step": 401 |
| }, |
| { |
| "epoch": 2.3519061583577714, |
| "grad_norm": 1.166319971957769, |
| "learning_rate": 3.61232230409953e-05, |
| "loss": 0.1503, |
| "mean_token_accuracy": 0.9557152092456818, |
| "step": 402 |
| }, |
| { |
| "epoch": 2.3577712609970676, |
| "grad_norm": 1.2712176223501275, |
| "learning_rate": 3.6101936888423936e-05, |
| "loss": 0.1664, |
| "mean_token_accuracy": 0.9563668668270111, |
| "step": 403 |
| }, |
| { |
| "epoch": 2.3636363636363638, |
| "grad_norm": 1.2891487616197648, |
| "learning_rate": 3.6080599551517076e-05, |
| "loss": 0.1767, |
| "mean_token_accuracy": 0.9508347064256668, |
| "step": 404 |
| }, |
| { |
| "epoch": 2.36950146627566, |
| "grad_norm": 1.3686551265636433, |
| "learning_rate": 3.605921110772063e-05, |
| "loss": 0.1799, |
| "mean_token_accuracy": 0.9483750611543655, |
| "step": 405 |
| }, |
| { |
| "epoch": 2.375366568914956, |
| "grad_norm": 1.2386733512977435, |
| "learning_rate": 3.603777163466601e-05, |
| "loss": 0.1483, |
| "mean_token_accuracy": 0.9566259980201721, |
| "step": 406 |
| }, |
| { |
| "epoch": 2.3812316715542523, |
| "grad_norm": 1.224756896302551, |
| "learning_rate": 3.6016281210169844e-05, |
| "loss": 0.1653, |
| "mean_token_accuracy": 0.9489512741565704, |
| "step": 407 |
| }, |
| { |
| "epoch": 2.3870967741935485, |
| "grad_norm": 1.230326595109465, |
| "learning_rate": 3.599473991223369e-05, |
| "loss": 0.1672, |
| "mean_token_accuracy": 0.9534252062439919, |
| "step": 408 |
| }, |
| { |
| "epoch": 2.3929618768328447, |
| "grad_norm": 1.2204737382987605, |
| "learning_rate": 3.5973147819043765e-05, |
| "loss": 0.1725, |
| "mean_token_accuracy": 0.9434708282351494, |
| "step": 409 |
| }, |
| { |
| "epoch": 2.398826979472141, |
| "grad_norm": 1.3957014903180014, |
| "learning_rate": 3.595150500897065e-05, |
| "loss": 0.1932, |
| "mean_token_accuracy": 0.939488522708416, |
| "step": 410 |
| }, |
| { |
| "epoch": 2.404692082111437, |
| "grad_norm": 1.1384701654699962, |
| "learning_rate": 3.5929811560569e-05, |
| "loss": 0.1584, |
| "mean_token_accuracy": 0.9575295448303223, |
| "step": 411 |
| }, |
| { |
| "epoch": 2.410557184750733, |
| "grad_norm": 0.9579104530696713, |
| "learning_rate": 3.590806755257726e-05, |
| "loss": 0.1333, |
| "mean_token_accuracy": 0.9575985744595528, |
| "step": 412 |
| }, |
| { |
| "epoch": 2.4164222873900294, |
| "grad_norm": 1.1734333582678909, |
| "learning_rate": 3.5886273063917426e-05, |
| "loss": 0.1621, |
| "mean_token_accuracy": 0.9481822401285172, |
| "step": 413 |
| }, |
| { |
| "epoch": 2.4222873900293256, |
| "grad_norm": 1.1544037143542194, |
| "learning_rate": 3.586442817369467e-05, |
| "loss": 0.1548, |
| "mean_token_accuracy": 0.9499908536672592, |
| "step": 414 |
| }, |
| { |
| "epoch": 2.4281524926686218, |
| "grad_norm": 1.128417594334492, |
| "learning_rate": 3.5842532961197114e-05, |
| "loss": 0.1466, |
| "mean_token_accuracy": 0.955817773938179, |
| "step": 415 |
| }, |
| { |
| "epoch": 2.434017595307918, |
| "grad_norm": 1.2635081826287935, |
| "learning_rate": 3.582058750589555e-05, |
| "loss": 0.1821, |
| "mean_token_accuracy": 0.9489640519022942, |
| "step": 416 |
| }, |
| { |
| "epoch": 2.439882697947214, |
| "grad_norm": 1.3902977463993567, |
| "learning_rate": 3.579859188744311e-05, |
| "loss": 0.2305, |
| "mean_token_accuracy": 0.9296863749623299, |
| "step": 417 |
| }, |
| { |
| "epoch": 2.4457478005865103, |
| "grad_norm": 1.3034135953208241, |
| "learning_rate": 3.5776546185675014e-05, |
| "loss": 0.1831, |
| "mean_token_accuracy": 0.9438828676939011, |
| "step": 418 |
| }, |
| { |
| "epoch": 2.4516129032258065, |
| "grad_norm": 1.2546254507595782, |
| "learning_rate": 3.5754450480608244e-05, |
| "loss": 0.1882, |
| "mean_token_accuracy": 0.9402789622545242, |
| "step": 419 |
| }, |
| { |
| "epoch": 2.4574780058651027, |
| "grad_norm": 1.1880320034830234, |
| "learning_rate": 3.5732304852441294e-05, |
| "loss": 0.187, |
| "mean_token_accuracy": 0.9429607689380646, |
| "step": 420 |
| }, |
| { |
| "epoch": 2.463343108504399, |
| "grad_norm": 1.3907667610919618, |
| "learning_rate": 3.571010938155386e-05, |
| "loss": 0.2268, |
| "mean_token_accuracy": 0.9306723326444626, |
| "step": 421 |
| }, |
| { |
| "epoch": 2.469208211143695, |
| "grad_norm": 1.3355415014834187, |
| "learning_rate": 3.5687864148506515e-05, |
| "loss": 0.1807, |
| "mean_token_accuracy": 0.9420250505208969, |
| "step": 422 |
| }, |
| { |
| "epoch": 2.4750733137829912, |
| "grad_norm": 1.1013453172706291, |
| "learning_rate": 3.566556923404048e-05, |
| "loss": 0.1451, |
| "mean_token_accuracy": 0.9562686085700989, |
| "step": 423 |
| }, |
| { |
| "epoch": 2.4809384164222874, |
| "grad_norm": 1.070627821312052, |
| "learning_rate": 3.5643224719077294e-05, |
| "loss": 0.1409, |
| "mean_token_accuracy": 0.9597943052649498, |
| "step": 424 |
| }, |
| { |
| "epoch": 2.4868035190615836, |
| "grad_norm": 1.051505185810034, |
| "learning_rate": 3.5620830684718515e-05, |
| "loss": 0.1443, |
| "mean_token_accuracy": 0.956302635371685, |
| "step": 425 |
| }, |
| { |
| "epoch": 2.4926686217008798, |
| "grad_norm": 1.154259044158569, |
| "learning_rate": 3.5598387212245456e-05, |
| "loss": 0.1595, |
| "mean_token_accuracy": 0.9494320005178452, |
| "step": 426 |
| }, |
| { |
| "epoch": 2.498533724340176, |
| "grad_norm": 1.2471720681070686, |
| "learning_rate": 3.5575894383118846e-05, |
| "loss": 0.1666, |
| "mean_token_accuracy": 0.9528159871697426, |
| "step": 427 |
| }, |
| { |
| "epoch": 2.504398826979472, |
| "grad_norm": 1.1939193991897141, |
| "learning_rate": 3.5553352278978574e-05, |
| "loss": 0.152, |
| "mean_token_accuracy": 0.9526803568005562, |
| "step": 428 |
| }, |
| { |
| "epoch": 2.5102639296187683, |
| "grad_norm": 1.2727638707528373, |
| "learning_rate": 3.553076098164337e-05, |
| "loss": 0.1536, |
| "mean_token_accuracy": 0.9583421349525452, |
| "step": 429 |
| }, |
| { |
| "epoch": 2.5161290322580645, |
| "grad_norm": 1.2422536760050964, |
| "learning_rate": 3.5508120573110516e-05, |
| "loss": 0.1731, |
| "mean_token_accuracy": 0.9483218640089035, |
| "step": 430 |
| }, |
| { |
| "epoch": 2.5219941348973607, |
| "grad_norm": 1.2403912167108455, |
| "learning_rate": 3.548543113555557e-05, |
| "loss": 0.1267, |
| "mean_token_accuracy": 0.9638039022684097, |
| "step": 431 |
| }, |
| { |
| "epoch": 2.527859237536657, |
| "grad_norm": 1.3152817380517734, |
| "learning_rate": 3.5462692751332014e-05, |
| "loss": 0.1791, |
| "mean_token_accuracy": 0.948051743209362, |
| "step": 432 |
| }, |
| { |
| "epoch": 2.533724340175953, |
| "grad_norm": 1.085975867278954, |
| "learning_rate": 3.5439905502970996e-05, |
| "loss": 0.1229, |
| "mean_token_accuracy": 0.9650994911789894, |
| "step": 433 |
| }, |
| { |
| "epoch": 2.5395894428152492, |
| "grad_norm": 1.149985648209585, |
| "learning_rate": 3.541706947318103e-05, |
| "loss": 0.1543, |
| "mean_token_accuracy": 0.9525493830442429, |
| "step": 434 |
| }, |
| { |
| "epoch": 2.5454545454545454, |
| "grad_norm": 1.470381191104711, |
| "learning_rate": 3.539418474484768e-05, |
| "loss": 0.2024, |
| "mean_token_accuracy": 0.9437252059578896, |
| "step": 435 |
| }, |
| { |
| "epoch": 2.5513196480938416, |
| "grad_norm": 1.12497151321389, |
| "learning_rate": 3.537125140103327e-05, |
| "loss": 0.1568, |
| "mean_token_accuracy": 0.9551517963409424, |
| "step": 436 |
| }, |
| { |
| "epoch": 2.557184750733138, |
| "grad_norm": 1.1347803729002162, |
| "learning_rate": 3.534826952497657e-05, |
| "loss": 0.1281, |
| "mean_token_accuracy": 0.9624199569225311, |
| "step": 437 |
| }, |
| { |
| "epoch": 2.563049853372434, |
| "grad_norm": 1.1951246836206368, |
| "learning_rate": 3.5325239200092505e-05, |
| "loss": 0.1647, |
| "mean_token_accuracy": 0.9496383666992188, |
| "step": 438 |
| }, |
| { |
| "epoch": 2.56891495601173, |
| "grad_norm": 1.1809247397642806, |
| "learning_rate": 3.5302160509971866e-05, |
| "loss": 0.172, |
| "mean_token_accuracy": 0.9477546736598015, |
| "step": 439 |
| }, |
| { |
| "epoch": 2.5747800586510263, |
| "grad_norm": 1.1596906219556977, |
| "learning_rate": 3.5279033538380974e-05, |
| "loss": 0.1639, |
| "mean_token_accuracy": 0.9496021196246147, |
| "step": 440 |
| }, |
| { |
| "epoch": 2.5806451612903225, |
| "grad_norm": 0.9920483376297324, |
| "learning_rate": 3.5255858369261385e-05, |
| "loss": 0.1189, |
| "mean_token_accuracy": 0.9629802703857422, |
| "step": 441 |
| }, |
| { |
| "epoch": 2.5865102639296187, |
| "grad_norm": 1.367720649975484, |
| "learning_rate": 3.523263508672961e-05, |
| "loss": 0.1885, |
| "mean_token_accuracy": 0.9483638033270836, |
| "step": 442 |
| }, |
| { |
| "epoch": 2.592375366568915, |
| "grad_norm": 1.1849168973571333, |
| "learning_rate": 3.520936377507679e-05, |
| "loss": 0.1537, |
| "mean_token_accuracy": 0.9526606574654579, |
| "step": 443 |
| }, |
| { |
| "epoch": 2.598240469208211, |
| "grad_norm": 1.3702847278864858, |
| "learning_rate": 3.5186044518768376e-05, |
| "loss": 0.2026, |
| "mean_token_accuracy": 0.9368810132145882, |
| "step": 444 |
| }, |
| { |
| "epoch": 2.6041055718475072, |
| "grad_norm": 1.4593973761132104, |
| "learning_rate": 3.5162677402443864e-05, |
| "loss": 0.1669, |
| "mean_token_accuracy": 0.9492918252944946, |
| "step": 445 |
| }, |
| { |
| "epoch": 2.6099706744868034, |
| "grad_norm": 1.1882833288447554, |
| "learning_rate": 3.513926251091644e-05, |
| "loss": 0.1538, |
| "mean_token_accuracy": 0.9531656056642532, |
| "step": 446 |
| }, |
| { |
| "epoch": 2.6158357771260996, |
| "grad_norm": 1.2494601852431129, |
| "learning_rate": 3.51157999291727e-05, |
| "loss": 0.1726, |
| "mean_token_accuracy": 0.9487390294671059, |
| "step": 447 |
| }, |
| { |
| "epoch": 2.621700879765396, |
| "grad_norm": 1.4261144649471686, |
| "learning_rate": 3.509228974237235e-05, |
| "loss": 0.2139, |
| "mean_token_accuracy": 0.9373802468180656, |
| "step": 448 |
| }, |
| { |
| "epoch": 2.627565982404692, |
| "grad_norm": 1.20200077833905, |
| "learning_rate": 3.506873203584787e-05, |
| "loss": 0.16, |
| "mean_token_accuracy": 0.9530724361538887, |
| "step": 449 |
| }, |
| { |
| "epoch": 2.633431085043988, |
| "grad_norm": 0.9474239286630374, |
| "learning_rate": 3.504512689510422e-05, |
| "loss": 0.1243, |
| "mean_token_accuracy": 0.9638230577111244, |
| "step": 450 |
| }, |
| { |
| "epoch": 2.6392961876832843, |
| "grad_norm": 1.1867300274109467, |
| "learning_rate": 3.5021474405818525e-05, |
| "loss": 0.1468, |
| "mean_token_accuracy": 0.9534016251564026, |
| "step": 451 |
| }, |
| { |
| "epoch": 2.6451612903225805, |
| "grad_norm": 1.249095861980521, |
| "learning_rate": 3.499777465383977e-05, |
| "loss": 0.1821, |
| "mean_token_accuracy": 0.9496468231081963, |
| "step": 452 |
| }, |
| { |
| "epoch": 2.6510263929618767, |
| "grad_norm": 1.2012362608906981, |
| "learning_rate": 3.497402772518848e-05, |
| "loss": 0.183, |
| "mean_token_accuracy": 0.9402816966176033, |
| "step": 453 |
| }, |
| { |
| "epoch": 2.656891495601173, |
| "grad_norm": 1.1225945446252723, |
| "learning_rate": 3.4950233706056415e-05, |
| "loss": 0.1433, |
| "mean_token_accuracy": 0.9563997834920883, |
| "step": 454 |
| }, |
| { |
| "epoch": 2.662756598240469, |
| "grad_norm": 1.3042750039691624, |
| "learning_rate": 3.4926392682806265e-05, |
| "loss": 0.1905, |
| "mean_token_accuracy": 0.9459337666630745, |
| "step": 455 |
| }, |
| { |
| "epoch": 2.6686217008797652, |
| "grad_norm": 1.232909377449062, |
| "learning_rate": 3.490250474197131e-05, |
| "loss": 0.1782, |
| "mean_token_accuracy": 0.9491490572690964, |
| "step": 456 |
| }, |
| { |
| "epoch": 2.6744868035190614, |
| "grad_norm": 1.1867787570349237, |
| "learning_rate": 3.4878569970255116e-05, |
| "loss": 0.1541, |
| "mean_token_accuracy": 0.9519700258970261, |
| "step": 457 |
| }, |
| { |
| "epoch": 2.6803519061583576, |
| "grad_norm": 1.247851879583845, |
| "learning_rate": 3.485458845453125e-05, |
| "loss": 0.1759, |
| "mean_token_accuracy": 0.9479285329580307, |
| "step": 458 |
| }, |
| { |
| "epoch": 2.686217008797654, |
| "grad_norm": 1.0354665171969741, |
| "learning_rate": 3.483056028184293e-05, |
| "loss": 0.1247, |
| "mean_token_accuracy": 0.9654245972633362, |
| "step": 459 |
| }, |
| { |
| "epoch": 2.6920821114369504, |
| "grad_norm": 1.1760698781621217, |
| "learning_rate": 3.4806485539402716e-05, |
| "loss": 0.1507, |
| "mean_token_accuracy": 0.9528908804059029, |
| "step": 460 |
| }, |
| { |
| "epoch": 2.6979472140762466, |
| "grad_norm": 1.0691808653279993, |
| "learning_rate": 3.4782364314592186e-05, |
| "loss": 0.1437, |
| "mean_token_accuracy": 0.9560307934880257, |
| "step": 461 |
| }, |
| { |
| "epoch": 2.703812316715543, |
| "grad_norm": 1.17729816637266, |
| "learning_rate": 3.475819669496167e-05, |
| "loss": 0.1363, |
| "mean_token_accuracy": 0.9557816758751869, |
| "step": 462 |
| }, |
| { |
| "epoch": 2.709677419354839, |
| "grad_norm": 1.0634781787620604, |
| "learning_rate": 3.473398276822985e-05, |
| "loss": 0.1467, |
| "mean_token_accuracy": 0.9533882141113281, |
| "step": 463 |
| }, |
| { |
| "epoch": 2.715542521994135, |
| "grad_norm": 1.2950839756637311, |
| "learning_rate": 3.47097226222835e-05, |
| "loss": 0.1824, |
| "mean_token_accuracy": 0.9495379999279976, |
| "step": 464 |
| }, |
| { |
| "epoch": 2.7214076246334313, |
| "grad_norm": 1.2423471841197542, |
| "learning_rate": 3.468541634517716e-05, |
| "loss": 0.155, |
| "mean_token_accuracy": 0.9580182358622551, |
| "step": 465 |
| }, |
| { |
| "epoch": 2.7272727272727275, |
| "grad_norm": 1.0744705318053995, |
| "learning_rate": 3.4661064025132796e-05, |
| "loss": 0.1206, |
| "mean_token_accuracy": 0.958877831697464, |
| "step": 466 |
| }, |
| { |
| "epoch": 2.7331378299120237, |
| "grad_norm": 1.5727806483902393, |
| "learning_rate": 3.463666575053949e-05, |
| "loss": 0.2087, |
| "mean_token_accuracy": 0.9415034204721451, |
| "step": 467 |
| }, |
| { |
| "epoch": 2.73900293255132, |
| "grad_norm": 0.950722552564766, |
| "learning_rate": 3.4612221609953126e-05, |
| "loss": 0.1352, |
| "mean_token_accuracy": 0.961692214012146, |
| "step": 468 |
| }, |
| { |
| "epoch": 2.744868035190616, |
| "grad_norm": 1.1223316535506394, |
| "learning_rate": 3.4587731692096065e-05, |
| "loss": 0.1565, |
| "mean_token_accuracy": 0.9538895487785339, |
| "step": 469 |
| }, |
| { |
| "epoch": 2.7507331378299122, |
| "grad_norm": 1.3022612967089928, |
| "learning_rate": 3.4563196085856815e-05, |
| "loss": 0.1817, |
| "mean_token_accuracy": 0.9450778216123581, |
| "step": 470 |
| }, |
| { |
| "epoch": 2.7565982404692084, |
| "grad_norm": 1.07230491048182, |
| "learning_rate": 3.4538614880289724e-05, |
| "loss": 0.1615, |
| "mean_token_accuracy": 0.9541483297944069, |
| "step": 471 |
| }, |
| { |
| "epoch": 2.7624633431085046, |
| "grad_norm": 0.9956217514306797, |
| "learning_rate": 3.4513988164614635e-05, |
| "loss": 0.1227, |
| "mean_token_accuracy": 0.9635503962635994, |
| "step": 472 |
| }, |
| { |
| "epoch": 2.768328445747801, |
| "grad_norm": 1.0071708374540242, |
| "learning_rate": 3.4489316028216584e-05, |
| "loss": 0.1317, |
| "mean_token_accuracy": 0.9630229771137238, |
| "step": 473 |
| }, |
| { |
| "epoch": 2.774193548387097, |
| "grad_norm": 0.9934398270519144, |
| "learning_rate": 3.446459856064545e-05, |
| "loss": 0.1364, |
| "mean_token_accuracy": 0.9594153240323067, |
| "step": 474 |
| }, |
| { |
| "epoch": 2.780058651026393, |
| "grad_norm": 1.4123399599829125, |
| "learning_rate": 3.443983585161568e-05, |
| "loss": 0.1758, |
| "mean_token_accuracy": 0.9463600069284439, |
| "step": 475 |
| }, |
| { |
| "epoch": 2.7859237536656893, |
| "grad_norm": 1.2042742464006473, |
| "learning_rate": 3.441502799100588e-05, |
| "loss": 0.1598, |
| "mean_token_accuracy": 0.959763303399086, |
| "step": 476 |
| }, |
| { |
| "epoch": 2.7917888563049855, |
| "grad_norm": 1.1516859677521762, |
| "learning_rate": 3.439017506885858e-05, |
| "loss": 0.1616, |
| "mean_token_accuracy": 0.9520630687475204, |
| "step": 477 |
| }, |
| { |
| "epoch": 2.7976539589442817, |
| "grad_norm": 1.2254942875301806, |
| "learning_rate": 3.436527717537985e-05, |
| "loss": 0.1617, |
| "mean_token_accuracy": 0.9591241255402565, |
| "step": 478 |
| }, |
| { |
| "epoch": 2.803519061583578, |
| "grad_norm": 1.1073823095384847, |
| "learning_rate": 3.434033440093899e-05, |
| "loss": 0.1639, |
| "mean_token_accuracy": 0.9487877935171127, |
| "step": 479 |
| }, |
| { |
| "epoch": 2.809384164222874, |
| "grad_norm": 1.25742736357608, |
| "learning_rate": 3.431534683606818e-05, |
| "loss": 0.1864, |
| "mean_token_accuracy": 0.948534868657589, |
| "step": 480 |
| }, |
| { |
| "epoch": 2.8152492668621703, |
| "grad_norm": 1.0344032720648466, |
| "learning_rate": 3.4290314571462214e-05, |
| "loss": 0.1417, |
| "mean_token_accuracy": 0.9601919278502464, |
| "step": 481 |
| }, |
| { |
| "epoch": 2.8211143695014664, |
| "grad_norm": 0.9930463384326542, |
| "learning_rate": 3.426523769797808e-05, |
| "loss": 0.1359, |
| "mean_token_accuracy": 0.9583753347396851, |
| "step": 482 |
| }, |
| { |
| "epoch": 2.8269794721407626, |
| "grad_norm": 1.2042277956276288, |
| "learning_rate": 3.424011630663472e-05, |
| "loss": 0.1653, |
| "mean_token_accuracy": 0.9464479833841324, |
| "step": 483 |
| }, |
| { |
| "epoch": 2.832844574780059, |
| "grad_norm": 1.194262739676029, |
| "learning_rate": 3.421495048861262e-05, |
| "loss": 0.171, |
| "mean_token_accuracy": 0.9502200856804848, |
| "step": 484 |
| }, |
| { |
| "epoch": 2.838709677419355, |
| "grad_norm": 1.1544939224845419, |
| "learning_rate": 3.418974033525355e-05, |
| "loss": 0.1409, |
| "mean_token_accuracy": 0.9586869552731514, |
| "step": 485 |
| }, |
| { |
| "epoch": 2.844574780058651, |
| "grad_norm": 1.187689984945221, |
| "learning_rate": 3.416448593806019e-05, |
| "loss": 0.1751, |
| "mean_token_accuracy": 0.9496021121740341, |
| "step": 486 |
| }, |
| { |
| "epoch": 2.8504398826979473, |
| "grad_norm": 1.1330699826268225, |
| "learning_rate": 3.4139187388695774e-05, |
| "loss": 0.1551, |
| "mean_token_accuracy": 0.950385794043541, |
| "step": 487 |
| }, |
| { |
| "epoch": 2.8563049853372435, |
| "grad_norm": 1.307262909941581, |
| "learning_rate": 3.411384477898385e-05, |
| "loss": 0.1655, |
| "mean_token_accuracy": 0.9537685662508011, |
| "step": 488 |
| }, |
| { |
| "epoch": 2.8621700879765397, |
| "grad_norm": 1.0577486778486105, |
| "learning_rate": 3.408845820090784e-05, |
| "loss": 0.1544, |
| "mean_token_accuracy": 0.9556203186511993, |
| "step": 489 |
| }, |
| { |
| "epoch": 2.868035190615836, |
| "grad_norm": 1.246360384808443, |
| "learning_rate": 3.406302774661077e-05, |
| "loss": 0.1983, |
| "mean_token_accuracy": 0.939469151198864, |
| "step": 490 |
| }, |
| { |
| "epoch": 2.873900293255132, |
| "grad_norm": 1.2815397210697366, |
| "learning_rate": 3.403755350839492e-05, |
| "loss": 0.1934, |
| "mean_token_accuracy": 0.945215106010437, |
| "step": 491 |
| }, |
| { |
| "epoch": 2.8797653958944283, |
| "grad_norm": 0.9463210261831989, |
| "learning_rate": 3.401203557872149e-05, |
| "loss": 0.1094, |
| "mean_token_accuracy": 0.9667675942182541, |
| "step": 492 |
| }, |
| { |
| "epoch": 2.8856304985337244, |
| "grad_norm": 1.1206820564307887, |
| "learning_rate": 3.398647405021026e-05, |
| "loss": 0.1511, |
| "mean_token_accuracy": 0.9558945000171661, |
| "step": 493 |
| }, |
| { |
| "epoch": 2.8914956011730206, |
| "grad_norm": 1.3956556588522069, |
| "learning_rate": 3.396086901563925e-05, |
| "loss": 0.2011, |
| "mean_token_accuracy": 0.9396672174334526, |
| "step": 494 |
| }, |
| { |
| "epoch": 2.897360703812317, |
| "grad_norm": 0.9893183505790021, |
| "learning_rate": 3.3935220567944395e-05, |
| "loss": 0.1346, |
| "mean_token_accuracy": 0.9598894119262695, |
| "step": 495 |
| }, |
| { |
| "epoch": 2.903225806451613, |
| "grad_norm": 1.2616449700071117, |
| "learning_rate": 3.39095288002192e-05, |
| "loss": 0.195, |
| "mean_token_accuracy": 0.9443835839629173, |
| "step": 496 |
| }, |
| { |
| "epoch": 2.909090909090909, |
| "grad_norm": 1.0424261885719452, |
| "learning_rate": 3.3883793805714406e-05, |
| "loss": 0.1502, |
| "mean_token_accuracy": 0.9556261077523232, |
| "step": 497 |
| }, |
| { |
| "epoch": 2.9149560117302054, |
| "grad_norm": 1.3618750738536685, |
| "learning_rate": 3.3858015677837656e-05, |
| "loss": 0.18, |
| "mean_token_accuracy": 0.9481714516878128, |
| "step": 498 |
| }, |
| { |
| "epoch": 2.9208211143695015, |
| "grad_norm": 1.2006128437590662, |
| "learning_rate": 3.3832194510153126e-05, |
| "loss": 0.1718, |
| "mean_token_accuracy": 0.9532595574855804, |
| "step": 499 |
| }, |
| { |
| "epoch": 2.9266862170087977, |
| "grad_norm": 1.2296015739991573, |
| "learning_rate": 3.380633039638125e-05, |
| "loss": 0.164, |
| "mean_token_accuracy": 0.9541523456573486, |
| "step": 500 |
| }, |
| { |
| "epoch": 2.932551319648094, |
| "grad_norm": 1.2323869227106137, |
| "learning_rate": 3.37804234303983e-05, |
| "loss": 0.1802, |
| "mean_token_accuracy": 0.946508027613163, |
| "step": 501 |
| }, |
| { |
| "epoch": 2.93841642228739, |
| "grad_norm": 1.2142191669529525, |
| "learning_rate": 3.37544737062361e-05, |
| "loss": 0.1738, |
| "mean_token_accuracy": 0.9512768238782883, |
| "step": 502 |
| }, |
| { |
| "epoch": 2.9442815249266863, |
| "grad_norm": 1.192114759053001, |
| "learning_rate": 3.372848131808167e-05, |
| "loss": 0.1685, |
| "mean_token_accuracy": 0.9535468518733978, |
| "step": 503 |
| }, |
| { |
| "epoch": 2.9501466275659824, |
| "grad_norm": 1.2900442027550065, |
| "learning_rate": 3.370244636027688e-05, |
| "loss": 0.1652, |
| "mean_token_accuracy": 0.9492153376340866, |
| "step": 504 |
| }, |
| { |
| "epoch": 2.9560117302052786, |
| "grad_norm": 1.4383269282382705, |
| "learning_rate": 3.367636892731812e-05, |
| "loss": 0.1692, |
| "mean_token_accuracy": 0.9460426717996597, |
| "step": 505 |
| }, |
| { |
| "epoch": 2.961876832844575, |
| "grad_norm": 0.965502377956891, |
| "learning_rate": 3.365024911385593e-05, |
| "loss": 0.1286, |
| "mean_token_accuracy": 0.963849164545536, |
| "step": 506 |
| }, |
| { |
| "epoch": 2.967741935483871, |
| "grad_norm": 1.0151333575737922, |
| "learning_rate": 3.362408701469469e-05, |
| "loss": 0.1506, |
| "mean_token_accuracy": 0.9497946873307228, |
| "step": 507 |
| }, |
| { |
| "epoch": 2.973607038123167, |
| "grad_norm": 1.2314813082134561, |
| "learning_rate": 3.359788272479225e-05, |
| "loss": 0.1796, |
| "mean_token_accuracy": 0.9487903341650963, |
| "step": 508 |
| }, |
| { |
| "epoch": 2.9794721407624634, |
| "grad_norm": 1.2284872298249723, |
| "learning_rate": 3.35716363392596e-05, |
| "loss": 0.1735, |
| "mean_token_accuracy": 0.946588970720768, |
| "step": 509 |
| }, |
| { |
| "epoch": 2.9853372434017595, |
| "grad_norm": 1.3942128527138467, |
| "learning_rate": 3.354534795336052e-05, |
| "loss": 0.2326, |
| "mean_token_accuracy": 0.9368415027856827, |
| "step": 510 |
| }, |
| { |
| "epoch": 2.9912023460410557, |
| "grad_norm": 1.0199724160589165, |
| "learning_rate": 3.351901766251123e-05, |
| "loss": 0.1612, |
| "mean_token_accuracy": 0.9494053423404694, |
| "step": 511 |
| }, |
| { |
| "epoch": 2.997067448680352, |
| "grad_norm": 1.4236896466420417, |
| "learning_rate": 3.349264556228006e-05, |
| "loss": 0.2099, |
| "mean_token_accuracy": 0.9403877630829811, |
| "step": 512 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 1.4236896466420417, |
| "learning_rate": 3.3466231748387077e-05, |
| "loss": 0.2159, |
| "mean_token_accuracy": 0.929660826921463, |
| "step": 513 |
| }, |
| { |
| "epoch": 3.005865102639296, |
| "grad_norm": 1.917706001626474, |
| "learning_rate": 3.343977631670376e-05, |
| "loss": 0.0928, |
| "mean_token_accuracy": 0.9745519906282425, |
| "step": 514 |
| }, |
| { |
| "epoch": 3.0117302052785924, |
| "grad_norm": 0.9353206453623386, |
| "learning_rate": 3.341327936325264e-05, |
| "loss": 0.1087, |
| "mean_token_accuracy": 0.9702077433466911, |
| "step": 515 |
| }, |
| { |
| "epoch": 3.0175953079178885, |
| "grad_norm": 0.793270262682745, |
| "learning_rate": 3.338674098420695e-05, |
| "loss": 0.0927, |
| "mean_token_accuracy": 0.9723505601286888, |
| "step": 516 |
| }, |
| { |
| "epoch": 3.0234604105571847, |
| "grad_norm": 0.9442060709437615, |
| "learning_rate": 3.33601612758903e-05, |
| "loss": 0.1154, |
| "mean_token_accuracy": 0.9676948711276054, |
| "step": 517 |
| }, |
| { |
| "epoch": 3.029325513196481, |
| "grad_norm": 0.9089470810917948, |
| "learning_rate": 3.3333540334776286e-05, |
| "loss": 0.1085, |
| "mean_token_accuracy": 0.9652741998434067, |
| "step": 518 |
| }, |
| { |
| "epoch": 3.035190615835777, |
| "grad_norm": 0.9535344298120216, |
| "learning_rate": 3.330687825748818e-05, |
| "loss": 0.0966, |
| "mean_token_accuracy": 0.9707278311252594, |
| "step": 519 |
| }, |
| { |
| "epoch": 3.0410557184750733, |
| "grad_norm": 1.0173869551440817, |
| "learning_rate": 3.328017514079855e-05, |
| "loss": 0.119, |
| "mean_token_accuracy": 0.9670073837041855, |
| "step": 520 |
| }, |
| { |
| "epoch": 3.0469208211143695, |
| "grad_norm": 0.7792951102956577, |
| "learning_rate": 3.325343108162893e-05, |
| "loss": 0.0937, |
| "mean_token_accuracy": 0.9708864092826843, |
| "step": 521 |
| }, |
| { |
| "epoch": 3.0527859237536656, |
| "grad_norm": 0.945465442856844, |
| "learning_rate": 3.3226646177049446e-05, |
| "loss": 0.115, |
| "mean_token_accuracy": 0.9687144085764885, |
| "step": 522 |
| }, |
| { |
| "epoch": 3.058651026392962, |
| "grad_norm": 0.9843530411347534, |
| "learning_rate": 3.3199820524278485e-05, |
| "loss": 0.1165, |
| "mean_token_accuracy": 0.9642806574702263, |
| "step": 523 |
| }, |
| { |
| "epoch": 3.064516129032258, |
| "grad_norm": 1.084726474246644, |
| "learning_rate": 3.317295422068234e-05, |
| "loss": 0.1176, |
| "mean_token_accuracy": 0.965855173766613, |
| "step": 524 |
| }, |
| { |
| "epoch": 3.070381231671554, |
| "grad_norm": 0.9445207089649317, |
| "learning_rate": 3.314604736377484e-05, |
| "loss": 0.0873, |
| "mean_token_accuracy": 0.9730576723814011, |
| "step": 525 |
| }, |
| { |
| "epoch": 3.0762463343108504, |
| "grad_norm": 0.8091369892946971, |
| "learning_rate": 3.3119100051217005e-05, |
| "loss": 0.0891, |
| "mean_token_accuracy": 0.9751559272408485, |
| "step": 526 |
| }, |
| { |
| "epoch": 3.0821114369501466, |
| "grad_norm": 0.94656315451539, |
| "learning_rate": 3.3092112380816696e-05, |
| "loss": 0.104, |
| "mean_token_accuracy": 0.9682408720254898, |
| "step": 527 |
| }, |
| { |
| "epoch": 3.0879765395894427, |
| "grad_norm": 0.8332801420075987, |
| "learning_rate": 3.306508445052826e-05, |
| "loss": 0.1142, |
| "mean_token_accuracy": 0.9660920351743698, |
| "step": 528 |
| }, |
| { |
| "epoch": 3.093841642228739, |
| "grad_norm": 1.2155753046929505, |
| "learning_rate": 3.303801635845216e-05, |
| "loss": 0.11, |
| "mean_token_accuracy": 0.9683258086442947, |
| "step": 529 |
| }, |
| { |
| "epoch": 3.099706744868035, |
| "grad_norm": 1.0637098841625154, |
| "learning_rate": 3.301090820283465e-05, |
| "loss": 0.1197, |
| "mean_token_accuracy": 0.9640811383724213, |
| "step": 530 |
| }, |
| { |
| "epoch": 3.1055718475073313, |
| "grad_norm": 1.0473569018050832, |
| "learning_rate": 3.298376008206739e-05, |
| "loss": 0.111, |
| "mean_token_accuracy": 0.9675813242793083, |
| "step": 531 |
| }, |
| { |
| "epoch": 3.1114369501466275, |
| "grad_norm": 0.6922733863108391, |
| "learning_rate": 3.295657209468707e-05, |
| "loss": 0.0863, |
| "mean_token_accuracy": 0.9753805994987488, |
| "step": 532 |
| }, |
| { |
| "epoch": 3.1173020527859236, |
| "grad_norm": 0.922894718374272, |
| "learning_rate": 3.2929344339375125e-05, |
| "loss": 0.1141, |
| "mean_token_accuracy": 0.9667019098997116, |
| "step": 533 |
| }, |
| { |
| "epoch": 3.12316715542522, |
| "grad_norm": 1.0319575218914387, |
| "learning_rate": 3.290207691495731e-05, |
| "loss": 0.1078, |
| "mean_token_accuracy": 0.969107136130333, |
| "step": 534 |
| }, |
| { |
| "epoch": 3.129032258064516, |
| "grad_norm": 0.9278719667767502, |
| "learning_rate": 3.2874769920403355e-05, |
| "loss": 0.1026, |
| "mean_token_accuracy": 0.9660426154732704, |
| "step": 535 |
| }, |
| { |
| "epoch": 3.134897360703812, |
| "grad_norm": 0.7507543958848601, |
| "learning_rate": 3.2847423454826616e-05, |
| "loss": 0.0985, |
| "mean_token_accuracy": 0.9724163636565208, |
| "step": 536 |
| }, |
| { |
| "epoch": 3.1407624633431084, |
| "grad_norm": 0.9242090726097795, |
| "learning_rate": 3.2820037617483734e-05, |
| "loss": 0.1248, |
| "mean_token_accuracy": 0.9671787321567535, |
| "step": 537 |
| }, |
| { |
| "epoch": 3.1466275659824046, |
| "grad_norm": 1.0582856339832543, |
| "learning_rate": 3.2792612507774224e-05, |
| "loss": 0.1082, |
| "mean_token_accuracy": 0.9701619669795036, |
| "step": 538 |
| }, |
| { |
| "epoch": 3.1524926686217007, |
| "grad_norm": 0.7962224695055707, |
| "learning_rate": 3.2765148225240176e-05, |
| "loss": 0.1022, |
| "mean_token_accuracy": 0.9693987816572189, |
| "step": 539 |
| }, |
| { |
| "epoch": 3.158357771260997, |
| "grad_norm": 0.8913562259790775, |
| "learning_rate": 3.273764486956583e-05, |
| "loss": 0.1188, |
| "mean_token_accuracy": 0.9641912281513214, |
| "step": 540 |
| }, |
| { |
| "epoch": 3.164222873900293, |
| "grad_norm": 0.9391686716445263, |
| "learning_rate": 3.2710102540577256e-05, |
| "loss": 0.1015, |
| "mean_token_accuracy": 0.9703034535050392, |
| "step": 541 |
| }, |
| { |
| "epoch": 3.1700879765395893, |
| "grad_norm": 1.1425921994518553, |
| "learning_rate": 3.268252133824198e-05, |
| "loss": 0.1396, |
| "mean_token_accuracy": 0.9599046036601067, |
| "step": 542 |
| }, |
| { |
| "epoch": 3.1759530791788855, |
| "grad_norm": 0.9837980520671072, |
| "learning_rate": 3.2654901362668656e-05, |
| "loss": 0.105, |
| "mean_token_accuracy": 0.9691286087036133, |
| "step": 543 |
| }, |
| { |
| "epoch": 3.1818181818181817, |
| "grad_norm": 1.0478243618422738, |
| "learning_rate": 3.262724271410661e-05, |
| "loss": 0.1177, |
| "mean_token_accuracy": 0.964763417840004, |
| "step": 544 |
| }, |
| { |
| "epoch": 3.187683284457478, |
| "grad_norm": 1.051440350841896, |
| "learning_rate": 3.2599545492945584e-05, |
| "loss": 0.1281, |
| "mean_token_accuracy": 0.9659412503242493, |
| "step": 545 |
| }, |
| { |
| "epoch": 3.193548387096774, |
| "grad_norm": 1.2381984787570743, |
| "learning_rate": 3.257180979971529e-05, |
| "loss": 0.122, |
| "mean_token_accuracy": 0.96320890635252, |
| "step": 546 |
| }, |
| { |
| "epoch": 3.19941348973607, |
| "grad_norm": 0.9566730062260478, |
| "learning_rate": 3.25440357350851e-05, |
| "loss": 0.1227, |
| "mean_token_accuracy": 0.9624106585979462, |
| "step": 547 |
| }, |
| { |
| "epoch": 3.2052785923753664, |
| "grad_norm": 1.0303596937757689, |
| "learning_rate": 3.251622339986366e-05, |
| "loss": 0.1208, |
| "mean_token_accuracy": 0.9645057767629623, |
| "step": 548 |
| }, |
| { |
| "epoch": 3.2111436950146626, |
| "grad_norm": 1.1499267317362676, |
| "learning_rate": 3.24883728949985e-05, |
| "loss": 0.1289, |
| "mean_token_accuracy": 0.9617469310760498, |
| "step": 549 |
| }, |
| { |
| "epoch": 3.2170087976539588, |
| "grad_norm": 0.8579142698468588, |
| "learning_rate": 3.2460484321575714e-05, |
| "loss": 0.0921, |
| "mean_token_accuracy": 0.9694699496030807, |
| "step": 550 |
| }, |
| { |
| "epoch": 3.222873900293255, |
| "grad_norm": 1.2241087162872863, |
| "learning_rate": 3.2432557780819556e-05, |
| "loss": 0.0932, |
| "mean_token_accuracy": 0.9721159860491753, |
| "step": 551 |
| }, |
| { |
| "epoch": 3.228739002932551, |
| "grad_norm": 0.8167439933486885, |
| "learning_rate": 3.240459337409209e-05, |
| "loss": 0.1116, |
| "mean_token_accuracy": 0.9674685597419739, |
| "step": 552 |
| }, |
| { |
| "epoch": 3.2346041055718473, |
| "grad_norm": 0.7997732243833637, |
| "learning_rate": 3.237659120289282e-05, |
| "loss": 0.1017, |
| "mean_token_accuracy": 0.9701759144663811, |
| "step": 553 |
| }, |
| { |
| "epoch": 3.2404692082111435, |
| "grad_norm": 1.1095187159507571, |
| "learning_rate": 3.2348551368858315e-05, |
| "loss": 0.1156, |
| "mean_token_accuracy": 0.9673218578100204, |
| "step": 554 |
| }, |
| { |
| "epoch": 3.2463343108504397, |
| "grad_norm": 0.9606996629666116, |
| "learning_rate": 3.2320473973761845e-05, |
| "loss": 0.1153, |
| "mean_token_accuracy": 0.9676761701703072, |
| "step": 555 |
| }, |
| { |
| "epoch": 3.252199413489736, |
| "grad_norm": 0.9247316094180343, |
| "learning_rate": 3.229235911951303e-05, |
| "loss": 0.1153, |
| "mean_token_accuracy": 0.9680519327521324, |
| "step": 556 |
| }, |
| { |
| "epoch": 3.258064516129032, |
| "grad_norm": 1.1072521549049836, |
| "learning_rate": 3.2264206908157425e-05, |
| "loss": 0.101, |
| "mean_token_accuracy": 0.9698139801621437, |
| "step": 557 |
| }, |
| { |
| "epoch": 3.263929618768328, |
| "grad_norm": 0.8418708276056709, |
| "learning_rate": 3.2236017441876185e-05, |
| "loss": 0.1188, |
| "mean_token_accuracy": 0.9673982262611389, |
| "step": 558 |
| }, |
| { |
| "epoch": 3.2697947214076244, |
| "grad_norm": 1.0299813252012295, |
| "learning_rate": 3.220779082298569e-05, |
| "loss": 0.109, |
| "mean_token_accuracy": 0.9702173173427582, |
| "step": 559 |
| }, |
| { |
| "epoch": 3.2756598240469206, |
| "grad_norm": 1.0202554844724807, |
| "learning_rate": 3.2179527153937165e-05, |
| "loss": 0.1288, |
| "mean_token_accuracy": 0.9627135470509529, |
| "step": 560 |
| }, |
| { |
| "epoch": 3.281524926686217, |
| "grad_norm": 0.9077193846639583, |
| "learning_rate": 3.2151226537316315e-05, |
| "loss": 0.0963, |
| "mean_token_accuracy": 0.9713364169001579, |
| "step": 561 |
| }, |
| { |
| "epoch": 3.2873900293255134, |
| "grad_norm": 0.8778878752612806, |
| "learning_rate": 3.212288907584296e-05, |
| "loss": 0.103, |
| "mean_token_accuracy": 0.9681121036410332, |
| "step": 562 |
| }, |
| { |
| "epoch": 3.2932551319648096, |
| "grad_norm": 0.9509437842453761, |
| "learning_rate": 3.209451487237062e-05, |
| "loss": 0.1296, |
| "mean_token_accuracy": 0.9637468382716179, |
| "step": 563 |
| }, |
| { |
| "epoch": 3.2991202346041058, |
| "grad_norm": 1.0297542251177307, |
| "learning_rate": 3.206610402988621e-05, |
| "loss": 0.1095, |
| "mean_token_accuracy": 0.9680195823311806, |
| "step": 564 |
| }, |
| { |
| "epoch": 3.304985337243402, |
| "grad_norm": 0.927223963783926, |
| "learning_rate": 3.20376566515096e-05, |
| "loss": 0.0991, |
| "mean_token_accuracy": 0.9703445583581924, |
| "step": 565 |
| }, |
| { |
| "epoch": 3.310850439882698, |
| "grad_norm": 0.8239448328897864, |
| "learning_rate": 3.20091728404933e-05, |
| "loss": 0.0992, |
| "mean_token_accuracy": 0.9700045213103294, |
| "step": 566 |
| }, |
| { |
| "epoch": 3.3167155425219943, |
| "grad_norm": 0.9180503831666103, |
| "learning_rate": 3.1980652700222024e-05, |
| "loss": 0.105, |
| "mean_token_accuracy": 0.9704331159591675, |
| "step": 567 |
| }, |
| { |
| "epoch": 3.3225806451612905, |
| "grad_norm": 0.8526038871722106, |
| "learning_rate": 3.195209633421237e-05, |
| "loss": 0.1126, |
| "mean_token_accuracy": 0.9635952338576317, |
| "step": 568 |
| }, |
| { |
| "epoch": 3.3284457478005867, |
| "grad_norm": 0.9429157822338116, |
| "learning_rate": 3.192350384611242e-05, |
| "loss": 0.1249, |
| "mean_token_accuracy": 0.9621228873729706, |
| "step": 569 |
| }, |
| { |
| "epoch": 3.334310850439883, |
| "grad_norm": 1.0853666053637696, |
| "learning_rate": 3.1894875339701354e-05, |
| "loss": 0.1161, |
| "mean_token_accuracy": 0.9705541431903839, |
| "step": 570 |
| }, |
| { |
| "epoch": 3.340175953079179, |
| "grad_norm": 0.9698729643334536, |
| "learning_rate": 3.186621091888909e-05, |
| "loss": 0.1228, |
| "mean_token_accuracy": 0.9655315577983856, |
| "step": 571 |
| }, |
| { |
| "epoch": 3.346041055718475, |
| "grad_norm": 0.9453776118037494, |
| "learning_rate": 3.183751068771588e-05, |
| "loss": 0.1184, |
| "mean_token_accuracy": 0.9671064466238022, |
| "step": 572 |
| }, |
| { |
| "epoch": 3.3519061583577714, |
| "grad_norm": 0.9290593976617697, |
| "learning_rate": 3.180877475035199e-05, |
| "loss": 0.1112, |
| "mean_token_accuracy": 0.9667282104492188, |
| "step": 573 |
| }, |
| { |
| "epoch": 3.3577712609970676, |
| "grad_norm": 0.8475132554914343, |
| "learning_rate": 3.178000321109727e-05, |
| "loss": 0.1171, |
| "mean_token_accuracy": 0.9664184153079987, |
| "step": 574 |
| }, |
| { |
| "epoch": 3.3636363636363638, |
| "grad_norm": 0.9901954629417643, |
| "learning_rate": 3.175119617438078e-05, |
| "loss": 0.1193, |
| "mean_token_accuracy": 0.9652402922511101, |
| "step": 575 |
| }, |
| { |
| "epoch": 3.36950146627566, |
| "grad_norm": 1.0773481785229146, |
| "learning_rate": 3.172235374476043e-05, |
| "loss": 0.1095, |
| "mean_token_accuracy": 0.9663127958774567, |
| "step": 576 |
| }, |
| { |
| "epoch": 3.375366568914956, |
| "grad_norm": 0.8410303889573532, |
| "learning_rate": 3.169347602692259e-05, |
| "loss": 0.1155, |
| "mean_token_accuracy": 0.9649907350540161, |
| "step": 577 |
| }, |
| { |
| "epoch": 3.3812316715542523, |
| "grad_norm": 0.919889189853559, |
| "learning_rate": 3.166456312568171e-05, |
| "loss": 0.1066, |
| "mean_token_accuracy": 0.9651471823453903, |
| "step": 578 |
| }, |
| { |
| "epoch": 3.3870967741935485, |
| "grad_norm": 0.9847134009233571, |
| "learning_rate": 3.1635615145979955e-05, |
| "loss": 0.1325, |
| "mean_token_accuracy": 0.9620075672864914, |
| "step": 579 |
| }, |
| { |
| "epoch": 3.3929618768328447, |
| "grad_norm": 0.9140552479884303, |
| "learning_rate": 3.160663219288679e-05, |
| "loss": 0.0929, |
| "mean_token_accuracy": 0.9710179567337036, |
| "step": 580 |
| }, |
| { |
| "epoch": 3.398826979472141, |
| "grad_norm": 0.8655879512510697, |
| "learning_rate": 3.157761437159863e-05, |
| "loss": 0.1227, |
| "mean_token_accuracy": 0.9625556096434593, |
| "step": 581 |
| }, |
| { |
| "epoch": 3.404692082111437, |
| "grad_norm": 1.016148263569575, |
| "learning_rate": 3.1548561787438445e-05, |
| "loss": 0.1038, |
| "mean_token_accuracy": 0.970151960849762, |
| "step": 582 |
| }, |
| { |
| "epoch": 3.410557184750733, |
| "grad_norm": 0.9990010783150937, |
| "learning_rate": 3.15194745458554e-05, |
| "loss": 0.1094, |
| "mean_token_accuracy": 0.9681411162018776, |
| "step": 583 |
| }, |
| { |
| "epoch": 3.4164222873900294, |
| "grad_norm": 0.8982758800759956, |
| "learning_rate": 3.149035275242441e-05, |
| "loss": 0.1008, |
| "mean_token_accuracy": 0.970494419336319, |
| "step": 584 |
| }, |
| { |
| "epoch": 3.4222873900293256, |
| "grad_norm": 0.9411992221855227, |
| "learning_rate": 3.1461196512845834e-05, |
| "loss": 0.1134, |
| "mean_token_accuracy": 0.9663120433688164, |
| "step": 585 |
| }, |
| { |
| "epoch": 3.4281524926686218, |
| "grad_norm": 1.130870863841404, |
| "learning_rate": 3.143200593294504e-05, |
| "loss": 0.1154, |
| "mean_token_accuracy": 0.9686842858791351, |
| "step": 586 |
| }, |
| { |
| "epoch": 3.434017595307918, |
| "grad_norm": 1.266553022930829, |
| "learning_rate": 3.1402781118672065e-05, |
| "loss": 0.1313, |
| "mean_token_accuracy": 0.9647129997611046, |
| "step": 587 |
| }, |
| { |
| "epoch": 3.439882697947214, |
| "grad_norm": 1.0333390497650004, |
| "learning_rate": 3.137352217610115e-05, |
| "loss": 0.1119, |
| "mean_token_accuracy": 0.967375859618187, |
| "step": 588 |
| }, |
| { |
| "epoch": 3.4457478005865103, |
| "grad_norm": 0.8920677150256165, |
| "learning_rate": 3.1344229211430465e-05, |
| "loss": 0.1126, |
| "mean_token_accuracy": 0.96572595089674, |
| "step": 589 |
| }, |
| { |
| "epoch": 3.4516129032258065, |
| "grad_norm": 0.9100521404888532, |
| "learning_rate": 3.131490233098164e-05, |
| "loss": 0.099, |
| "mean_token_accuracy": 0.973532646894455, |
| "step": 590 |
| }, |
| { |
| "epoch": 3.4574780058651027, |
| "grad_norm": 1.0560831237985342, |
| "learning_rate": 3.1285541641199383e-05, |
| "loss": 0.119, |
| "mean_token_accuracy": 0.9654569253325462, |
| "step": 591 |
| }, |
| { |
| "epoch": 3.463343108504399, |
| "grad_norm": 0.9819382175267215, |
| "learning_rate": 3.1256147248651166e-05, |
| "loss": 0.1105, |
| "mean_token_accuracy": 0.9697766527533531, |
| "step": 592 |
| }, |
| { |
| "epoch": 3.469208211143695, |
| "grad_norm": 0.9456952920282133, |
| "learning_rate": 3.122671926002675e-05, |
| "loss": 0.1141, |
| "mean_token_accuracy": 0.962925061583519, |
| "step": 593 |
| }, |
| { |
| "epoch": 3.4750733137829912, |
| "grad_norm": 0.8886708651599997, |
| "learning_rate": 3.119725778213785e-05, |
| "loss": 0.119, |
| "mean_token_accuracy": 0.9643419906497002, |
| "step": 594 |
| }, |
| { |
| "epoch": 3.4809384164222874, |
| "grad_norm": 1.2042272320453875, |
| "learning_rate": 3.116776292191774e-05, |
| "loss": 0.1284, |
| "mean_token_accuracy": 0.9641223028302193, |
| "step": 595 |
| }, |
| { |
| "epoch": 3.4868035190615836, |
| "grad_norm": 0.868426298920179, |
| "learning_rate": 3.1138234786420834e-05, |
| "loss": 0.1075, |
| "mean_token_accuracy": 0.9682093411684036, |
| "step": 596 |
| }, |
| { |
| "epoch": 3.4926686217008798, |
| "grad_norm": 0.8636859419474582, |
| "learning_rate": 3.110867348282235e-05, |
| "loss": 0.1231, |
| "mean_token_accuracy": 0.9666341170668602, |
| "step": 597 |
| }, |
| { |
| "epoch": 3.498533724340176, |
| "grad_norm": 1.0945410380452534, |
| "learning_rate": 3.107907911841787e-05, |
| "loss": 0.1133, |
| "mean_token_accuracy": 0.9635356739163399, |
| "step": 598 |
| }, |
| { |
| "epoch": 3.504398826979472, |
| "grad_norm": 0.8736645011251645, |
| "learning_rate": 3.104945180062301e-05, |
| "loss": 0.1013, |
| "mean_token_accuracy": 0.9706991836428642, |
| "step": 599 |
| }, |
| { |
| "epoch": 3.5102639296187683, |
| "grad_norm": 0.8599394312237839, |
| "learning_rate": 3.1019791636972936e-05, |
| "loss": 0.1088, |
| "mean_token_accuracy": 0.9657791554927826, |
| "step": 600 |
| }, |
| { |
| "epoch": 3.5161290322580645, |
| "grad_norm": 0.9734042928382984, |
| "learning_rate": 3.099009873512208e-05, |
| "loss": 0.1147, |
| "mean_token_accuracy": 0.9690323546528816, |
| "step": 601 |
| }, |
| { |
| "epoch": 3.5219941348973607, |
| "grad_norm": 0.8671838676434842, |
| "learning_rate": 3.0960373202843685e-05, |
| "loss": 0.0987, |
| "mean_token_accuracy": 0.9714084416627884, |
| "step": 602 |
| }, |
| { |
| "epoch": 3.527859237536657, |
| "grad_norm": 1.0600541718141812, |
| "learning_rate": 3.093061514802943e-05, |
| "loss": 0.1223, |
| "mean_token_accuracy": 0.9637552127242088, |
| "step": 603 |
| }, |
| { |
| "epoch": 3.533724340175953, |
| "grad_norm": 0.9672322618677217, |
| "learning_rate": 3.090082467868901e-05, |
| "loss": 0.1004, |
| "mean_token_accuracy": 0.9703481644392014, |
| "step": 604 |
| }, |
| { |
| "epoch": 3.5395894428152492, |
| "grad_norm": 0.9215427402997913, |
| "learning_rate": 3.087100190294983e-05, |
| "loss": 0.1118, |
| "mean_token_accuracy": 0.9667570516467094, |
| "step": 605 |
| }, |
| { |
| "epoch": 3.5454545454545454, |
| "grad_norm": 1.044225936500776, |
| "learning_rate": 3.0841146929056505e-05, |
| "loss": 0.1282, |
| "mean_token_accuracy": 0.9646147862076759, |
| "step": 606 |
| }, |
| { |
| "epoch": 3.5513196480938416, |
| "grad_norm": 1.1406441411754127, |
| "learning_rate": 3.0811259865370535e-05, |
| "loss": 0.1021, |
| "mean_token_accuracy": 0.97073944658041, |
| "step": 607 |
| }, |
| { |
| "epoch": 3.557184750733138, |
| "grad_norm": 0.8384047045794155, |
| "learning_rate": 3.07813408203699e-05, |
| "loss": 0.1001, |
| "mean_token_accuracy": 0.9701420590281487, |
| "step": 608 |
| }, |
| { |
| "epoch": 3.563049853372434, |
| "grad_norm": 0.7890069517164189, |
| "learning_rate": 3.075138990264863e-05, |
| "loss": 0.1221, |
| "mean_token_accuracy": 0.9611668586730957, |
| "step": 609 |
| }, |
| { |
| "epoch": 3.56891495601173, |
| "grad_norm": 0.8128150056909983, |
| "learning_rate": 3.072140722091648e-05, |
| "loss": 0.0897, |
| "mean_token_accuracy": 0.9721631705760956, |
| "step": 610 |
| }, |
| { |
| "epoch": 3.5747800586510263, |
| "grad_norm": 0.982204128026469, |
| "learning_rate": 3.0691392883998455e-05, |
| "loss": 0.1357, |
| "mean_token_accuracy": 0.9621530324220657, |
| "step": 611 |
| }, |
| { |
| "epoch": 3.5806451612903225, |
| "grad_norm": 1.0337495257797795, |
| "learning_rate": 3.0661347000834496e-05, |
| "loss": 0.1034, |
| "mean_token_accuracy": 0.9698660597205162, |
| "step": 612 |
| }, |
| { |
| "epoch": 3.5865102639296187, |
| "grad_norm": 0.8802096358922299, |
| "learning_rate": 3.063126968047901e-05, |
| "loss": 0.1073, |
| "mean_token_accuracy": 0.9643291085958481, |
| "step": 613 |
| }, |
| { |
| "epoch": 3.592375366568915, |
| "grad_norm": 0.9070710629182922, |
| "learning_rate": 3.060116103210053e-05, |
| "loss": 0.0907, |
| "mean_token_accuracy": 0.9729605987668037, |
| "step": 614 |
| }, |
| { |
| "epoch": 3.598240469208211, |
| "grad_norm": 0.7858682826254871, |
| "learning_rate": 3.057102116498129e-05, |
| "loss": 0.1061, |
| "mean_token_accuracy": 0.9679286181926727, |
| "step": 615 |
| }, |
| { |
| "epoch": 3.6041055718475072, |
| "grad_norm": 1.116732342857861, |
| "learning_rate": 3.0540850188516826e-05, |
| "loss": 0.1317, |
| "mean_token_accuracy": 0.9623885974287987, |
| "step": 616 |
| }, |
| { |
| "epoch": 3.6099706744868034, |
| "grad_norm": 0.9723503508992479, |
| "learning_rate": 3.051064821221561e-05, |
| "loss": 0.095, |
| "mean_token_accuracy": 0.9739877283573151, |
| "step": 617 |
| }, |
| { |
| "epoch": 3.6158357771260996, |
| "grad_norm": 0.8932015441700736, |
| "learning_rate": 3.0480415345698606e-05, |
| "loss": 0.136, |
| "mean_token_accuracy": 0.958889864385128, |
| "step": 618 |
| }, |
| { |
| "epoch": 3.621700879765396, |
| "grad_norm": 0.978385970902145, |
| "learning_rate": 3.045015169869892e-05, |
| "loss": 0.1021, |
| "mean_token_accuracy": 0.9719918370246887, |
| "step": 619 |
| }, |
| { |
| "epoch": 3.627565982404692, |
| "grad_norm": 1.111054861659078, |
| "learning_rate": 3.0419857381061355e-05, |
| "loss": 0.1262, |
| "mean_token_accuracy": 0.9625189378857613, |
| "step": 620 |
| }, |
| { |
| "epoch": 3.633431085043988, |
| "grad_norm": 0.7988893400125326, |
| "learning_rate": 3.0389532502742066e-05, |
| "loss": 0.1131, |
| "mean_token_accuracy": 0.9654566794633865, |
| "step": 621 |
| }, |
| { |
| "epoch": 3.6392961876832843, |
| "grad_norm": 0.9734473777514459, |
| "learning_rate": 3.0359177173808104e-05, |
| "loss": 0.1251, |
| "mean_token_accuracy": 0.9642170071601868, |
| "step": 622 |
| }, |
| { |
| "epoch": 3.6451612903225805, |
| "grad_norm": 0.9400850428078926, |
| "learning_rate": 3.032879150443705e-05, |
| "loss": 0.114, |
| "mean_token_accuracy": 0.9686667993664742, |
| "step": 623 |
| }, |
| { |
| "epoch": 3.6510263929618767, |
| "grad_norm": 0.9535498861769731, |
| "learning_rate": 3.029837560491662e-05, |
| "loss": 0.096, |
| "mean_token_accuracy": 0.9681509435176849, |
| "step": 624 |
| }, |
| { |
| "epoch": 3.656891495601173, |
| "grad_norm": 0.938181261351189, |
| "learning_rate": 3.0267929585644236e-05, |
| "loss": 0.1285, |
| "mean_token_accuracy": 0.9615221172571182, |
| "step": 625 |
| }, |
| { |
| "epoch": 3.662756598240469, |
| "grad_norm": 0.9923272982279318, |
| "learning_rate": 3.0237453557126656e-05, |
| "loss": 0.1001, |
| "mean_token_accuracy": 0.9676861017942429, |
| "step": 626 |
| }, |
| { |
| "epoch": 3.6686217008797652, |
| "grad_norm": 0.8665048960994197, |
| "learning_rate": 3.020694762997956e-05, |
| "loss": 0.1054, |
| "mean_token_accuracy": 0.967039056122303, |
| "step": 627 |
| }, |
| { |
| "epoch": 3.6744868035190614, |
| "grad_norm": 0.8363872341156405, |
| "learning_rate": 3.017641191492714e-05, |
| "loss": 0.0958, |
| "mean_token_accuracy": 0.9716126248240471, |
| "step": 628 |
| }, |
| { |
| "epoch": 3.6803519061583576, |
| "grad_norm": 0.8150922734512929, |
| "learning_rate": 3.0145846522801703e-05, |
| "loss": 0.0956, |
| "mean_token_accuracy": 0.971979595720768, |
| "step": 629 |
| }, |
| { |
| "epoch": 3.686217008797654, |
| "grad_norm": 0.9190785469098514, |
| "learning_rate": 3.0115251564543287e-05, |
| "loss": 0.1333, |
| "mean_token_accuracy": 0.9590764716267586, |
| "step": 630 |
| }, |
| { |
| "epoch": 3.6920821114369504, |
| "grad_norm": 1.0778968802891915, |
| "learning_rate": 3.008462715119922e-05, |
| "loss": 0.1461, |
| "mean_token_accuracy": 0.9540571868419647, |
| "step": 631 |
| }, |
| { |
| "epoch": 3.6979472140762466, |
| "grad_norm": 1.135241704691624, |
| "learning_rate": 3.0053973393923768e-05, |
| "loss": 0.0949, |
| "mean_token_accuracy": 0.9684988856315613, |
| "step": 632 |
| }, |
| { |
| "epoch": 3.703812316715543, |
| "grad_norm": 0.8026943444777539, |
| "learning_rate": 3.0023290403977694e-05, |
| "loss": 0.1205, |
| "mean_token_accuracy": 0.9627582207322121, |
| "step": 633 |
| }, |
| { |
| "epoch": 3.709677419354839, |
| "grad_norm": 1.060469864983061, |
| "learning_rate": 2.9992578292727842e-05, |
| "loss": 0.1132, |
| "mean_token_accuracy": 0.965522937476635, |
| "step": 634 |
| }, |
| { |
| "epoch": 3.715542521994135, |
| "grad_norm": 0.8622809489621395, |
| "learning_rate": 2.9961837171646778e-05, |
| "loss": 0.1159, |
| "mean_token_accuracy": 0.967415414750576, |
| "step": 635 |
| }, |
| { |
| "epoch": 3.7214076246334313, |
| "grad_norm": 0.8840405653301492, |
| "learning_rate": 2.993106715231237e-05, |
| "loss": 0.1132, |
| "mean_token_accuracy": 0.9685205817222595, |
| "step": 636 |
| }, |
| { |
| "epoch": 3.7272727272727275, |
| "grad_norm": 1.1001761901330231, |
| "learning_rate": 2.9900268346407336e-05, |
| "loss": 0.1206, |
| "mean_token_accuracy": 0.9662887156009674, |
| "step": 637 |
| }, |
| { |
| "epoch": 3.7331378299120237, |
| "grad_norm": 0.9880058176105925, |
| "learning_rate": 2.986944086571893e-05, |
| "loss": 0.131, |
| "mean_token_accuracy": 0.9618512764573097, |
| "step": 638 |
| }, |
| { |
| "epoch": 3.73900293255132, |
| "grad_norm": 0.9708212836272636, |
| "learning_rate": 2.983858482213843e-05, |
| "loss": 0.0967, |
| "mean_token_accuracy": 0.9714419692754745, |
| "step": 639 |
| }, |
| { |
| "epoch": 3.744868035190616, |
| "grad_norm": 0.7569383879074512, |
| "learning_rate": 2.9807700327660834e-05, |
| "loss": 0.1072, |
| "mean_token_accuracy": 0.9683928042650223, |
| "step": 640 |
| }, |
| { |
| "epoch": 3.7507331378299122, |
| "grad_norm": 0.9030504868168383, |
| "learning_rate": 2.977678749438437e-05, |
| "loss": 0.1194, |
| "mean_token_accuracy": 0.9649059996008873, |
| "step": 641 |
| }, |
| { |
| "epoch": 3.7565982404692084, |
| "grad_norm": 1.0884667040903802, |
| "learning_rate": 2.9745846434510146e-05, |
| "loss": 0.1105, |
| "mean_token_accuracy": 0.9691413938999176, |
| "step": 642 |
| }, |
| { |
| "epoch": 3.7624633431085046, |
| "grad_norm": 0.9753513539536118, |
| "learning_rate": 2.9714877260341705e-05, |
| "loss": 0.1186, |
| "mean_token_accuracy": 0.9606969803571701, |
| "step": 643 |
| }, |
| { |
| "epoch": 3.768328445747801, |
| "grad_norm": 0.7492296526886614, |
| "learning_rate": 2.9683880084284648e-05, |
| "loss": 0.077, |
| "mean_token_accuracy": 0.9752858132123947, |
| "step": 644 |
| }, |
| { |
| "epoch": 3.774193548387097, |
| "grad_norm": 0.8541278759879399, |
| "learning_rate": 2.96528550188462e-05, |
| "loss": 0.1225, |
| "mean_token_accuracy": 0.9665696918964386, |
| "step": 645 |
| }, |
| { |
| "epoch": 3.780058651026393, |
| "grad_norm": 0.8695425380218671, |
| "learning_rate": 2.962180217663483e-05, |
| "loss": 0.1141, |
| "mean_token_accuracy": 0.9651920199394226, |
| "step": 646 |
| }, |
| { |
| "epoch": 3.7859237536656893, |
| "grad_norm": 0.965732138124322, |
| "learning_rate": 2.95907216703598e-05, |
| "loss": 0.1194, |
| "mean_token_accuracy": 0.9652410075068474, |
| "step": 647 |
| }, |
| { |
| "epoch": 3.7917888563049855, |
| "grad_norm": 1.0337349786728662, |
| "learning_rate": 2.9559613612830797e-05, |
| "loss": 0.1222, |
| "mean_token_accuracy": 0.9637459143996239, |
| "step": 648 |
| }, |
| { |
| "epoch": 3.7976539589442817, |
| "grad_norm": 0.8407085586625942, |
| "learning_rate": 2.952847811695751e-05, |
| "loss": 0.1065, |
| "mean_token_accuracy": 0.9705112278461456, |
| "step": 649 |
| }, |
| { |
| "epoch": 3.803519061583578, |
| "grad_norm": 0.7987034943568222, |
| "learning_rate": 2.9497315295749218e-05, |
| "loss": 0.1165, |
| "mean_token_accuracy": 0.9658530652523041, |
| "step": 650 |
| }, |
| { |
| "epoch": 3.809384164222874, |
| "grad_norm": 0.9905673663074058, |
| "learning_rate": 2.9466125262314368e-05, |
| "loss": 0.1365, |
| "mean_token_accuracy": 0.9607385098934174, |
| "step": 651 |
| }, |
| { |
| "epoch": 3.8152492668621703, |
| "grad_norm": 0.8431989173157322, |
| "learning_rate": 2.9434908129860193e-05, |
| "loss": 0.1026, |
| "mean_token_accuracy": 0.9705355390906334, |
| "step": 652 |
| }, |
| { |
| "epoch": 3.8211143695014664, |
| "grad_norm": 0.9761533220193974, |
| "learning_rate": 2.9403664011692276e-05, |
| "loss": 0.1341, |
| "mean_token_accuracy": 0.9603022783994675, |
| "step": 653 |
| }, |
| { |
| "epoch": 3.8269794721407626, |
| "grad_norm": 1.0057652946436697, |
| "learning_rate": 2.9372393021214134e-05, |
| "loss": 0.138, |
| "mean_token_accuracy": 0.9568366184830666, |
| "step": 654 |
| }, |
| { |
| "epoch": 3.832844574780059, |
| "grad_norm": 1.1193327300180262, |
| "learning_rate": 2.9341095271926842e-05, |
| "loss": 0.1083, |
| "mean_token_accuracy": 0.9681970700621605, |
| "step": 655 |
| }, |
| { |
| "epoch": 3.838709677419355, |
| "grad_norm": 1.211332723198081, |
| "learning_rate": 2.930977087742859e-05, |
| "loss": 0.1119, |
| "mean_token_accuracy": 0.9678284898400307, |
| "step": 656 |
| }, |
| { |
| "epoch": 3.844574780058651, |
| "grad_norm": 0.9825614982183972, |
| "learning_rate": 2.9278419951414277e-05, |
| "loss": 0.1261, |
| "mean_token_accuracy": 0.9617257192730904, |
| "step": 657 |
| }, |
| { |
| "epoch": 3.8504398826979473, |
| "grad_norm": 0.8315738446465553, |
| "learning_rate": 2.9247042607675105e-05, |
| "loss": 0.1169, |
| "mean_token_accuracy": 0.9657503962516785, |
| "step": 658 |
| }, |
| { |
| "epoch": 3.8563049853372435, |
| "grad_norm": 0.8775305493698721, |
| "learning_rate": 2.9215638960098164e-05, |
| "loss": 0.0755, |
| "mean_token_accuracy": 0.9759255200624466, |
| "step": 659 |
| }, |
| { |
| "epoch": 3.8621700879765397, |
| "grad_norm": 0.6996032422889692, |
| "learning_rate": 2.9184209122665996e-05, |
| "loss": 0.1072, |
| "mean_token_accuracy": 0.9670997187495232, |
| "step": 660 |
| }, |
| { |
| "epoch": 3.868035190615836, |
| "grad_norm": 0.8460534329988707, |
| "learning_rate": 2.915275320945623e-05, |
| "loss": 0.122, |
| "mean_token_accuracy": 0.9646456241607666, |
| "step": 661 |
| }, |
| { |
| "epoch": 3.873900293255132, |
| "grad_norm": 1.1089879954428397, |
| "learning_rate": 2.9121271334641127e-05, |
| "loss": 0.1148, |
| "mean_token_accuracy": 0.9666710719466209, |
| "step": 662 |
| }, |
| { |
| "epoch": 3.8797653958944283, |
| "grad_norm": 0.9487108641230017, |
| "learning_rate": 2.908976361248717e-05, |
| "loss": 0.1017, |
| "mean_token_accuracy": 0.9736130684614182, |
| "step": 663 |
| }, |
| { |
| "epoch": 3.8856304985337244, |
| "grad_norm": 0.701535256126763, |
| "learning_rate": 2.9058230157354674e-05, |
| "loss": 0.1162, |
| "mean_token_accuracy": 0.963954895734787, |
| "step": 664 |
| }, |
| { |
| "epoch": 3.8914956011730206, |
| "grad_norm": 1.2184520006523096, |
| "learning_rate": 2.902667108369734e-05, |
| "loss": 0.1125, |
| "mean_token_accuracy": 0.9652879014611244, |
| "step": 665 |
| }, |
| { |
| "epoch": 3.897360703812317, |
| "grad_norm": 1.050805435187101, |
| "learning_rate": 2.8995086506061862e-05, |
| "loss": 0.1169, |
| "mean_token_accuracy": 0.9657002538442612, |
| "step": 666 |
| }, |
| { |
| "epoch": 3.903225806451613, |
| "grad_norm": 1.0753573344224259, |
| "learning_rate": 2.896347653908749e-05, |
| "loss": 0.1065, |
| "mean_token_accuracy": 0.9706609547138214, |
| "step": 667 |
| }, |
| { |
| "epoch": 3.909090909090909, |
| "grad_norm": 0.6759719386032113, |
| "learning_rate": 2.8931841297505657e-05, |
| "loss": 0.1053, |
| "mean_token_accuracy": 0.9689249470829964, |
| "step": 668 |
| }, |
| { |
| "epoch": 3.9149560117302054, |
| "grad_norm": 0.7879426759278125, |
| "learning_rate": 2.8900180896139503e-05, |
| "loss": 0.0825, |
| "mean_token_accuracy": 0.9760407134890556, |
| "step": 669 |
| }, |
| { |
| "epoch": 3.9208211143695015, |
| "grad_norm": 0.8961566450949507, |
| "learning_rate": 2.8868495449903498e-05, |
| "loss": 0.0891, |
| "mean_token_accuracy": 0.9749082326889038, |
| "step": 670 |
| }, |
| { |
| "epoch": 3.9266862170087977, |
| "grad_norm": 0.7145724309301391, |
| "learning_rate": 2.8836785073803014e-05, |
| "loss": 0.0968, |
| "mean_token_accuracy": 0.9699864313006401, |
| "step": 671 |
| }, |
| { |
| "epoch": 3.932551319648094, |
| "grad_norm": 0.7112042624615512, |
| "learning_rate": 2.880504988293391e-05, |
| "loss": 0.1009, |
| "mean_token_accuracy": 0.9703424945473671, |
| "step": 672 |
| }, |
| { |
| "epoch": 3.93841642228739, |
| "grad_norm": 0.8598718711890393, |
| "learning_rate": 2.8773289992482115e-05, |
| "loss": 0.106, |
| "mean_token_accuracy": 0.9680499285459518, |
| "step": 673 |
| }, |
| { |
| "epoch": 3.9442815249266863, |
| "grad_norm": 0.9698263823669294, |
| "learning_rate": 2.87415055177232e-05, |
| "loss": 0.1056, |
| "mean_token_accuracy": 0.9698052480816841, |
| "step": 674 |
| }, |
| { |
| "epoch": 3.9501466275659824, |
| "grad_norm": 0.8335613687508117, |
| "learning_rate": 2.870969657402197e-05, |
| "loss": 0.1152, |
| "mean_token_accuracy": 0.9647799134254456, |
| "step": 675 |
| }, |
| { |
| "epoch": 3.9560117302052786, |
| "grad_norm": 1.2113603594671711, |
| "learning_rate": 2.867786327683205e-05, |
| "loss": 0.1552, |
| "mean_token_accuracy": 0.9564560130238533, |
| "step": 676 |
| }, |
| { |
| "epoch": 3.961876832844575, |
| "grad_norm": 0.9088547489871039, |
| "learning_rate": 2.864600574169545e-05, |
| "loss": 0.1184, |
| "mean_token_accuracy": 0.9664463996887207, |
| "step": 677 |
| }, |
| { |
| "epoch": 3.967741935483871, |
| "grad_norm": 1.050048176179559, |
| "learning_rate": 2.861412408424216e-05, |
| "loss": 0.1118, |
| "mean_token_accuracy": 0.966325081884861, |
| "step": 678 |
| }, |
| { |
| "epoch": 3.973607038123167, |
| "grad_norm": 0.994917923182441, |
| "learning_rate": 2.8582218420189706e-05, |
| "loss": 0.1158, |
| "mean_token_accuracy": 0.9656457379460335, |
| "step": 679 |
| }, |
| { |
| "epoch": 3.9794721407624634, |
| "grad_norm": 1.0408318438711954, |
| "learning_rate": 2.855028886534278e-05, |
| "loss": 0.1272, |
| "mean_token_accuracy": 0.9610341414809227, |
| "step": 680 |
| }, |
| { |
| "epoch": 3.9853372434017595, |
| "grad_norm": 0.9371789995160021, |
| "learning_rate": 2.851833553559276e-05, |
| "loss": 0.1091, |
| "mean_token_accuracy": 0.966740570962429, |
| "step": 681 |
| }, |
| { |
| "epoch": 3.9912023460410557, |
| "grad_norm": 0.9273694196009427, |
| "learning_rate": 2.848635854691733e-05, |
| "loss": 0.1184, |
| "mean_token_accuracy": 0.9653659835457802, |
| "step": 682 |
| }, |
| { |
| "epoch": 3.997067448680352, |
| "grad_norm": 0.7340375512532604, |
| "learning_rate": 2.8454358015380046e-05, |
| "loss": 0.088, |
| "mean_token_accuracy": 0.9720573499798775, |
| "step": 683 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 1.2796567322551584, |
| "learning_rate": 2.8422334057129913e-05, |
| "loss": 0.1123, |
| "mean_token_accuracy": 0.9657177776098251, |
| "step": 684 |
| }, |
| { |
| "epoch": 4.005865102639296, |
| "grad_norm": 0.5814062084412526, |
| "learning_rate": 2.8390286788400967e-05, |
| "loss": 0.0807, |
| "mean_token_accuracy": 0.973865695297718, |
| "step": 685 |
| }, |
| { |
| "epoch": 4.011730205278592, |
| "grad_norm": 0.6533584141705436, |
| "learning_rate": 2.8358216325511847e-05, |
| "loss": 0.0723, |
| "mean_token_accuracy": 0.9766295105218887, |
| "step": 686 |
| }, |
| { |
| "epoch": 4.0175953079178885, |
| "grad_norm": 0.9162087220426504, |
| "learning_rate": 2.832612278486538e-05, |
| "loss": 0.1061, |
| "mean_token_accuracy": 0.9709026291966438, |
| "step": 687 |
| }, |
| { |
| "epoch": 4.023460410557185, |
| "grad_norm": 0.7375794680352793, |
| "learning_rate": 2.8294006282948165e-05, |
| "loss": 0.0891, |
| "mean_token_accuracy": 0.976003848016262, |
| "step": 688 |
| }, |
| { |
| "epoch": 4.029325513196481, |
| "grad_norm": 0.7494579181986135, |
| "learning_rate": 2.8261866936330123e-05, |
| "loss": 0.0816, |
| "mean_token_accuracy": 0.9738617315888405, |
| "step": 689 |
| }, |
| { |
| "epoch": 4.035190615835777, |
| "grad_norm": 0.6929603989933999, |
| "learning_rate": 2.8229704861664113e-05, |
| "loss": 0.0801, |
| "mean_token_accuracy": 0.9780551716685295, |
| "step": 690 |
| }, |
| { |
| "epoch": 4.041055718475073, |
| "grad_norm": 0.7164876946150348, |
| "learning_rate": 2.8197520175685462e-05, |
| "loss": 0.0788, |
| "mean_token_accuracy": 0.9771693646907806, |
| "step": 691 |
| }, |
| { |
| "epoch": 4.0469208211143695, |
| "grad_norm": 0.8679669696761695, |
| "learning_rate": 2.8165312995211596e-05, |
| "loss": 0.0799, |
| "mean_token_accuracy": 0.9789220467209816, |
| "step": 692 |
| }, |
| { |
| "epoch": 4.052785923753666, |
| "grad_norm": 0.5785509783140412, |
| "learning_rate": 2.813308343714156e-05, |
| "loss": 0.0706, |
| "mean_token_accuracy": 0.9771093800663948, |
| "step": 693 |
| }, |
| { |
| "epoch": 4.058651026392962, |
| "grad_norm": 0.7587374823959739, |
| "learning_rate": 2.810083161845564e-05, |
| "loss": 0.09, |
| "mean_token_accuracy": 0.9711701348423958, |
| "step": 694 |
| }, |
| { |
| "epoch": 4.064516129032258, |
| "grad_norm": 0.8604246722345931, |
| "learning_rate": 2.8068557656214913e-05, |
| "loss": 0.0861, |
| "mean_token_accuracy": 0.9758541658520699, |
| "step": 695 |
| }, |
| { |
| "epoch": 4.070381231671554, |
| "grad_norm": 0.6621420685564046, |
| "learning_rate": 2.8036261667560826e-05, |
| "loss": 0.0766, |
| "mean_token_accuracy": 0.9776150584220886, |
| "step": 696 |
| }, |
| { |
| "epoch": 4.07624633431085, |
| "grad_norm": 0.7637582164149367, |
| "learning_rate": 2.8003943769714776e-05, |
| "loss": 0.0956, |
| "mean_token_accuracy": 0.9732154309749603, |
| "step": 697 |
| }, |
| { |
| "epoch": 4.0821114369501466, |
| "grad_norm": 1.0199977041425903, |
| "learning_rate": 2.7971604079977673e-05, |
| "loss": 0.1004, |
| "mean_token_accuracy": 0.9695825353264809, |
| "step": 698 |
| }, |
| { |
| "epoch": 4.087976539589443, |
| "grad_norm": 0.750195052728083, |
| "learning_rate": 2.793924271572954e-05, |
| "loss": 0.0785, |
| "mean_token_accuracy": 0.975655235350132, |
| "step": 699 |
| }, |
| { |
| "epoch": 4.093841642228739, |
| "grad_norm": 0.6454840574121126, |
| "learning_rate": 2.7906859794429047e-05, |
| "loss": 0.0812, |
| "mean_token_accuracy": 0.9726183488965034, |
| "step": 700 |
| }, |
| { |
| "epoch": 4.099706744868035, |
| "grad_norm": 0.7783150151101518, |
| "learning_rate": 2.787445543361313e-05, |
| "loss": 0.0801, |
| "mean_token_accuracy": 0.9766353219747543, |
| "step": 701 |
| }, |
| { |
| "epoch": 4.105571847507331, |
| "grad_norm": 0.8354381215669707, |
| "learning_rate": 2.7842029750896525e-05, |
| "loss": 0.091, |
| "mean_token_accuracy": 0.9731776043772697, |
| "step": 702 |
| }, |
| { |
| "epoch": 4.1114369501466275, |
| "grad_norm": 0.8831663612555645, |
| "learning_rate": 2.7809582863971373e-05, |
| "loss": 0.0903, |
| "mean_token_accuracy": 0.9750881195068359, |
| "step": 703 |
| }, |
| { |
| "epoch": 4.117302052785924, |
| "grad_norm": 0.8696118136211701, |
| "learning_rate": 2.777711489060676e-05, |
| "loss": 0.0903, |
| "mean_token_accuracy": 0.971781887114048, |
| "step": 704 |
| }, |
| { |
| "epoch": 4.12316715542522, |
| "grad_norm": 0.6913327616601556, |
| "learning_rate": 2.7744625948648316e-05, |
| "loss": 0.0735, |
| "mean_token_accuracy": 0.9773849919438362, |
| "step": 705 |
| }, |
| { |
| "epoch": 4.129032258064516, |
| "grad_norm": 0.7555233699938674, |
| "learning_rate": 2.7712116156017783e-05, |
| "loss": 0.0843, |
| "mean_token_accuracy": 0.9777982458472252, |
| "step": 706 |
| }, |
| { |
| "epoch": 4.134897360703812, |
| "grad_norm": 0.9696476917690434, |
| "learning_rate": 2.7679585630712585e-05, |
| "loss": 0.0975, |
| "mean_token_accuracy": 0.9709056839346886, |
| "step": 707 |
| }, |
| { |
| "epoch": 4.140762463343108, |
| "grad_norm": 0.5938696022344896, |
| "learning_rate": 2.764703449080538e-05, |
| "loss": 0.0811, |
| "mean_token_accuracy": 0.9765899106860161, |
| "step": 708 |
| }, |
| { |
| "epoch": 4.146627565982405, |
| "grad_norm": 0.9067730169164242, |
| "learning_rate": 2.761446285444366e-05, |
| "loss": 0.0987, |
| "mean_token_accuracy": 0.9722139462828636, |
| "step": 709 |
| }, |
| { |
| "epoch": 4.152492668621701, |
| "grad_norm": 0.639869651782152, |
| "learning_rate": 2.758187083984931e-05, |
| "loss": 0.0679, |
| "mean_token_accuracy": 0.9808940887451172, |
| "step": 710 |
| }, |
| { |
| "epoch": 4.158357771260997, |
| "grad_norm": 0.783349736332696, |
| "learning_rate": 2.754925856531819e-05, |
| "loss": 0.1062, |
| "mean_token_accuracy": 0.9705143421888351, |
| "step": 711 |
| }, |
| { |
| "epoch": 4.164222873900293, |
| "grad_norm": 1.7638873091647072, |
| "learning_rate": 2.7516626149219678e-05, |
| "loss": 0.0963, |
| "mean_token_accuracy": 0.9750856310129166, |
| "step": 712 |
| }, |
| { |
| "epoch": 4.170087976539589, |
| "grad_norm": 0.6835305264635491, |
| "learning_rate": 2.7483973709996267e-05, |
| "loss": 0.0873, |
| "mean_token_accuracy": 0.9742805510759354, |
| "step": 713 |
| }, |
| { |
| "epoch": 4.1759530791788855, |
| "grad_norm": 0.7878929380998676, |
| "learning_rate": 2.7451301366163116e-05, |
| "loss": 0.0978, |
| "mean_token_accuracy": 0.9704447388648987, |
| "step": 714 |
| }, |
| { |
| "epoch": 4.181818181818182, |
| "grad_norm": 0.6196814435565455, |
| "learning_rate": 2.741860923630765e-05, |
| "loss": 0.0733, |
| "mean_token_accuracy": 0.9794884473085403, |
| "step": 715 |
| }, |
| { |
| "epoch": 4.187683284457478, |
| "grad_norm": 0.7265891736306469, |
| "learning_rate": 2.7385897439089086e-05, |
| "loss": 0.0862, |
| "mean_token_accuracy": 0.9740516096353531, |
| "step": 716 |
| }, |
| { |
| "epoch": 4.193548387096774, |
| "grad_norm": 0.9562760180488723, |
| "learning_rate": 2.735316609323804e-05, |
| "loss": 0.1026, |
| "mean_token_accuracy": 0.9694742858409882, |
| "step": 717 |
| }, |
| { |
| "epoch": 4.19941348973607, |
| "grad_norm": 0.7996872026272548, |
| "learning_rate": 2.7320415317556085e-05, |
| "loss": 0.0878, |
| "mean_token_accuracy": 0.9754326492547989, |
| "step": 718 |
| }, |
| { |
| "epoch": 4.205278592375366, |
| "grad_norm": 0.6396887850496302, |
| "learning_rate": 2.72876452309153e-05, |
| "loss": 0.071, |
| "mean_token_accuracy": 0.9772866442799568, |
| "step": 719 |
| }, |
| { |
| "epoch": 4.211143695014663, |
| "grad_norm": 0.7719783389407485, |
| "learning_rate": 2.7254855952257867e-05, |
| "loss": 0.0898, |
| "mean_token_accuracy": 0.9726490750908852, |
| "step": 720 |
| }, |
| { |
| "epoch": 4.217008797653959, |
| "grad_norm": 1.0644091045972202, |
| "learning_rate": 2.7222047600595626e-05, |
| "loss": 0.1087, |
| "mean_token_accuracy": 0.9672816544771194, |
| "step": 721 |
| }, |
| { |
| "epoch": 4.222873900293255, |
| "grad_norm": 0.7154307567634154, |
| "learning_rate": 2.718922029500965e-05, |
| "loss": 0.084, |
| "mean_token_accuracy": 0.974772721529007, |
| "step": 722 |
| }, |
| { |
| "epoch": 4.228739002932551, |
| "grad_norm": 0.6908956165564543, |
| "learning_rate": 2.7156374154649787e-05, |
| "loss": 0.0815, |
| "mean_token_accuracy": 0.973622277379036, |
| "step": 723 |
| }, |
| { |
| "epoch": 4.234604105571847, |
| "grad_norm": 0.7365960380961718, |
| "learning_rate": 2.7123509298734267e-05, |
| "loss": 0.0858, |
| "mean_token_accuracy": 0.972634956240654, |
| "step": 724 |
| }, |
| { |
| "epoch": 4.2404692082111435, |
| "grad_norm": 0.8876489162788943, |
| "learning_rate": 2.7090625846549247e-05, |
| "loss": 0.0949, |
| "mean_token_accuracy": 0.972042515873909, |
| "step": 725 |
| }, |
| { |
| "epoch": 4.24633431085044, |
| "grad_norm": 0.8351301909607796, |
| "learning_rate": 2.705772391744837e-05, |
| "loss": 0.0912, |
| "mean_token_accuracy": 0.9738112688064575, |
| "step": 726 |
| }, |
| { |
| "epoch": 4.252199413489736, |
| "grad_norm": 0.7667250445002752, |
| "learning_rate": 2.7024803630852362e-05, |
| "loss": 0.0872, |
| "mean_token_accuracy": 0.9735923185944557, |
| "step": 727 |
| }, |
| { |
| "epoch": 4.258064516129032, |
| "grad_norm": 0.9337786078065189, |
| "learning_rate": 2.699186510624856e-05, |
| "loss": 0.1078, |
| "mean_token_accuracy": 0.969543345272541, |
| "step": 728 |
| }, |
| { |
| "epoch": 4.263929618768328, |
| "grad_norm": 0.7401750526879216, |
| "learning_rate": 2.6958908463190506e-05, |
| "loss": 0.0922, |
| "mean_token_accuracy": 0.9721928238868713, |
| "step": 729 |
| }, |
| { |
| "epoch": 4.269794721407624, |
| "grad_norm": 0.8159479602529707, |
| "learning_rate": 2.6925933821297497e-05, |
| "loss": 0.0906, |
| "mean_token_accuracy": 0.9723092764616013, |
| "step": 730 |
| }, |
| { |
| "epoch": 4.275659824046921, |
| "grad_norm": 0.7863328763793721, |
| "learning_rate": 2.6892941300254176e-05, |
| "loss": 0.0877, |
| "mean_token_accuracy": 0.9762269631028175, |
| "step": 731 |
| }, |
| { |
| "epoch": 4.281524926686217, |
| "grad_norm": 0.7226575436605356, |
| "learning_rate": 2.685993101981007e-05, |
| "loss": 0.0842, |
| "mean_token_accuracy": 0.9757591262459755, |
| "step": 732 |
| }, |
| { |
| "epoch": 4.287390029325513, |
| "grad_norm": 0.6717494608206148, |
| "learning_rate": 2.6826903099779157e-05, |
| "loss": 0.0814, |
| "mean_token_accuracy": 0.9769570678472519, |
| "step": 733 |
| }, |
| { |
| "epoch": 4.293255131964809, |
| "grad_norm": 0.8798299785191601, |
| "learning_rate": 2.679385766003945e-05, |
| "loss": 0.0998, |
| "mean_token_accuracy": 0.9717613831162453, |
| "step": 734 |
| }, |
| { |
| "epoch": 4.299120234604105, |
| "grad_norm": 0.6860539528931006, |
| "learning_rate": 2.676079482053255e-05, |
| "loss": 0.0919, |
| "mean_token_accuracy": 0.9732666462659836, |
| "step": 735 |
| }, |
| { |
| "epoch": 4.3049853372434015, |
| "grad_norm": 0.7965978126466666, |
| "learning_rate": 2.6727714701263212e-05, |
| "loss": 0.0911, |
| "mean_token_accuracy": 0.9728346392512321, |
| "step": 736 |
| }, |
| { |
| "epoch": 4.310850439882698, |
| "grad_norm": 0.6745321116193526, |
| "learning_rate": 2.669461742229891e-05, |
| "loss": 0.0872, |
| "mean_token_accuracy": 0.9757064208388329, |
| "step": 737 |
| }, |
| { |
| "epoch": 4.316715542521994, |
| "grad_norm": 0.7052793622815522, |
| "learning_rate": 2.6661503103769404e-05, |
| "loss": 0.0773, |
| "mean_token_accuracy": 0.9765629544854164, |
| "step": 738 |
| }, |
| { |
| "epoch": 4.32258064516129, |
| "grad_norm": 0.8439535536764042, |
| "learning_rate": 2.6628371865866286e-05, |
| "loss": 0.0976, |
| "mean_token_accuracy": 0.9718929752707481, |
| "step": 739 |
| }, |
| { |
| "epoch": 4.328445747800586, |
| "grad_norm": 0.7590924148224023, |
| "learning_rate": 2.6595223828842578e-05, |
| "loss": 0.0942, |
| "mean_token_accuracy": 0.9718786254525185, |
| "step": 740 |
| }, |
| { |
| "epoch": 4.334310850439882, |
| "grad_norm": 0.7115452306720939, |
| "learning_rate": 2.6562059113012253e-05, |
| "loss": 0.0842, |
| "mean_token_accuracy": 0.9750241562724113, |
| "step": 741 |
| }, |
| { |
| "epoch": 4.340175953079179, |
| "grad_norm": 0.5307398800874134, |
| "learning_rate": 2.6528877838749853e-05, |
| "loss": 0.0699, |
| "mean_token_accuracy": 0.9786670580506325, |
| "step": 742 |
| }, |
| { |
| "epoch": 4.346041055718475, |
| "grad_norm": 0.7855965606038025, |
| "learning_rate": 2.6495680126489984e-05, |
| "loss": 0.0878, |
| "mean_token_accuracy": 0.9722012206912041, |
| "step": 743 |
| }, |
| { |
| "epoch": 4.351906158357771, |
| "grad_norm": 0.8072504806635915, |
| "learning_rate": 2.6462466096726954e-05, |
| "loss": 0.1004, |
| "mean_token_accuracy": 0.9716598242521286, |
| "step": 744 |
| }, |
| { |
| "epoch": 4.357771260997067, |
| "grad_norm": 0.7450796282399247, |
| "learning_rate": 2.6429235870014256e-05, |
| "loss": 0.0853, |
| "mean_token_accuracy": 0.9746398106217384, |
| "step": 745 |
| }, |
| { |
| "epoch": 4.363636363636363, |
| "grad_norm": 0.8608235347633818, |
| "learning_rate": 2.639598956696421e-05, |
| "loss": 0.0979, |
| "mean_token_accuracy": 0.9726268425583839, |
| "step": 746 |
| }, |
| { |
| "epoch": 4.3695014662756595, |
| "grad_norm": 0.6571077470596837, |
| "learning_rate": 2.6362727308247458e-05, |
| "loss": 0.0809, |
| "mean_token_accuracy": 0.9742465242743492, |
| "step": 747 |
| }, |
| { |
| "epoch": 4.375366568914956, |
| "grad_norm": 0.7732181445332282, |
| "learning_rate": 2.6329449214592568e-05, |
| "loss": 0.0977, |
| "mean_token_accuracy": 0.9746908023953438, |
| "step": 748 |
| }, |
| { |
| "epoch": 4.381231671554252, |
| "grad_norm": 0.7521486012712132, |
| "learning_rate": 2.6296155406785578e-05, |
| "loss": 0.0979, |
| "mean_token_accuracy": 0.9705567210912704, |
| "step": 749 |
| }, |
| { |
| "epoch": 4.387096774193548, |
| "grad_norm": 0.736741257255026, |
| "learning_rate": 2.6262846005669572e-05, |
| "loss": 0.0822, |
| "mean_token_accuracy": 0.9748862311244011, |
| "step": 750 |
| }, |
| { |
| "epoch": 4.392961876832844, |
| "grad_norm": 0.7540120050721504, |
| "learning_rate": 2.6229521132144212e-05, |
| "loss": 0.0864, |
| "mean_token_accuracy": 0.9734518304467201, |
| "step": 751 |
| }, |
| { |
| "epoch": 4.39882697947214, |
| "grad_norm": 0.6830790352544612, |
| "learning_rate": 2.619618090716534e-05, |
| "loss": 0.0886, |
| "mean_token_accuracy": 0.9746551960706711, |
| "step": 752 |
| }, |
| { |
| "epoch": 4.404692082111437, |
| "grad_norm": 0.6953309994974072, |
| "learning_rate": 2.61628254517445e-05, |
| "loss": 0.0784, |
| "mean_token_accuracy": 0.9740583300590515, |
| "step": 753 |
| }, |
| { |
| "epoch": 4.410557184750733, |
| "grad_norm": 0.693352579440526, |
| "learning_rate": 2.612945488694853e-05, |
| "loss": 0.0938, |
| "mean_token_accuracy": 0.9734242856502533, |
| "step": 754 |
| }, |
| { |
| "epoch": 4.416422287390029, |
| "grad_norm": 0.8207186438354438, |
| "learning_rate": 2.6096069333899094e-05, |
| "loss": 0.0891, |
| "mean_token_accuracy": 0.9744479283690453, |
| "step": 755 |
| }, |
| { |
| "epoch": 4.422287390029325, |
| "grad_norm": 0.9881527516230675, |
| "learning_rate": 2.6062668913772275e-05, |
| "loss": 0.113, |
| "mean_token_accuracy": 0.9657848328351974, |
| "step": 756 |
| }, |
| { |
| "epoch": 4.428152492668621, |
| "grad_norm": 0.8892111964841636, |
| "learning_rate": 2.60292537477981e-05, |
| "loss": 0.097, |
| "mean_token_accuracy": 0.970199853181839, |
| "step": 757 |
| }, |
| { |
| "epoch": 4.4340175953079175, |
| "grad_norm": 0.6005235109234973, |
| "learning_rate": 2.5995823957260132e-05, |
| "loss": 0.0896, |
| "mean_token_accuracy": 0.970848336815834, |
| "step": 758 |
| }, |
| { |
| "epoch": 4.439882697947214, |
| "grad_norm": 0.971010752823258, |
| "learning_rate": 2.596237966349501e-05, |
| "loss": 0.0902, |
| "mean_token_accuracy": 0.9717428460717201, |
| "step": 759 |
| }, |
| { |
| "epoch": 4.44574780058651, |
| "grad_norm": 0.6147265684629026, |
| "learning_rate": 2.592892098789201e-05, |
| "loss": 0.0732, |
| "mean_token_accuracy": 0.9768884256482124, |
| "step": 760 |
| }, |
| { |
| "epoch": 4.451612903225806, |
| "grad_norm": 0.6909027369872971, |
| "learning_rate": 2.589544805189261e-05, |
| "loss": 0.0786, |
| "mean_token_accuracy": 0.9766824841499329, |
| "step": 761 |
| }, |
| { |
| "epoch": 4.457478005865102, |
| "grad_norm": 0.8361117235474076, |
| "learning_rate": 2.5861960976990056e-05, |
| "loss": 0.0792, |
| "mean_token_accuracy": 0.9764246940612793, |
| "step": 762 |
| }, |
| { |
| "epoch": 4.463343108504398, |
| "grad_norm": 0.8572632181135827, |
| "learning_rate": 2.5828459884728898e-05, |
| "loss": 0.0957, |
| "mean_token_accuracy": 0.9725618660449982, |
| "step": 763 |
| }, |
| { |
| "epoch": 4.469208211143695, |
| "grad_norm": 0.7140301153665474, |
| "learning_rate": 2.5794944896704572e-05, |
| "loss": 0.0818, |
| "mean_token_accuracy": 0.9770683497190475, |
| "step": 764 |
| }, |
| { |
| "epoch": 4.475073313782991, |
| "grad_norm": 0.7204009133906272, |
| "learning_rate": 2.5761416134562955e-05, |
| "loss": 0.0871, |
| "mean_token_accuracy": 0.9739867746829987, |
| "step": 765 |
| }, |
| { |
| "epoch": 4.480938416422287, |
| "grad_norm": 0.714625922360012, |
| "learning_rate": 2.5727873719999904e-05, |
| "loss": 0.0821, |
| "mean_token_accuracy": 0.9775990322232246, |
| "step": 766 |
| }, |
| { |
| "epoch": 4.486803519061583, |
| "grad_norm": 0.6987722867821043, |
| "learning_rate": 2.569431777476084e-05, |
| "loss": 0.0864, |
| "mean_token_accuracy": 0.9749646931886673, |
| "step": 767 |
| }, |
| { |
| "epoch": 4.492668621700879, |
| "grad_norm": 0.5888159677898139, |
| "learning_rate": 2.566074842064029e-05, |
| "loss": 0.0711, |
| "mean_token_accuracy": 0.9794390574097633, |
| "step": 768 |
| }, |
| { |
| "epoch": 4.4985337243401755, |
| "grad_norm": 0.5808906320753181, |
| "learning_rate": 2.562716577948145e-05, |
| "loss": 0.0768, |
| "mean_token_accuracy": 0.9758628606796265, |
| "step": 769 |
| }, |
| { |
| "epoch": 4.504398826979472, |
| "grad_norm": 0.6265757199170996, |
| "learning_rate": 2.5593569973175757e-05, |
| "loss": 0.0819, |
| "mean_token_accuracy": 0.9730802923440933, |
| "step": 770 |
| }, |
| { |
| "epoch": 4.510263929618768, |
| "grad_norm": 0.6979620561688771, |
| "learning_rate": 2.5559961123662405e-05, |
| "loss": 0.0826, |
| "mean_token_accuracy": 0.9771312400698662, |
| "step": 771 |
| }, |
| { |
| "epoch": 4.516129032258064, |
| "grad_norm": 0.7445338952150568, |
| "learning_rate": 2.5526339352927956e-05, |
| "loss": 0.0901, |
| "mean_token_accuracy": 0.9738541170954704, |
| "step": 772 |
| }, |
| { |
| "epoch": 4.52199413489736, |
| "grad_norm": 0.8162017405595724, |
| "learning_rate": 2.5492704783005847e-05, |
| "loss": 0.0855, |
| "mean_token_accuracy": 0.9749506264925003, |
| "step": 773 |
| }, |
| { |
| "epoch": 4.527859237536656, |
| "grad_norm": 0.8311724502956283, |
| "learning_rate": 2.5459057535975985e-05, |
| "loss": 0.0909, |
| "mean_token_accuracy": 0.9752181246876717, |
| "step": 774 |
| }, |
| { |
| "epoch": 4.533724340175953, |
| "grad_norm": 0.8339294840277889, |
| "learning_rate": 2.542539773396429e-05, |
| "loss": 0.0929, |
| "mean_token_accuracy": 0.9707833006978035, |
| "step": 775 |
| }, |
| { |
| "epoch": 4.539589442815249, |
| "grad_norm": 0.8200285823098186, |
| "learning_rate": 2.5391725499142253e-05, |
| "loss": 0.0995, |
| "mean_token_accuracy": 0.9717404097318649, |
| "step": 776 |
| }, |
| { |
| "epoch": 4.545454545454545, |
| "grad_norm": 0.6868234256963321, |
| "learning_rate": 2.535804095372648e-05, |
| "loss": 0.0848, |
| "mean_token_accuracy": 0.9748669788241386, |
| "step": 777 |
| }, |
| { |
| "epoch": 4.551319648093841, |
| "grad_norm": 0.7683932196268053, |
| "learning_rate": 2.5324344219978273e-05, |
| "loss": 0.0909, |
| "mean_token_accuracy": 0.9719524756073952, |
| "step": 778 |
| }, |
| { |
| "epoch": 4.557184750733137, |
| "grad_norm": 0.8618259518246234, |
| "learning_rate": 2.5290635420203162e-05, |
| "loss": 0.0941, |
| "mean_token_accuracy": 0.9723029881715775, |
| "step": 779 |
| }, |
| { |
| "epoch": 4.563049853372434, |
| "grad_norm": 0.6999101687652451, |
| "learning_rate": 2.525691467675048e-05, |
| "loss": 0.0877, |
| "mean_token_accuracy": 0.9769936203956604, |
| "step": 780 |
| }, |
| { |
| "epoch": 4.568914956011731, |
| "grad_norm": 0.6504309188678539, |
| "learning_rate": 2.5223182112012897e-05, |
| "loss": 0.085, |
| "mean_token_accuracy": 0.9745290204882622, |
| "step": 781 |
| }, |
| { |
| "epoch": 4.574780058651027, |
| "grad_norm": 0.5824157915995132, |
| "learning_rate": 2.5189437848426016e-05, |
| "loss": 0.0671, |
| "mean_token_accuracy": 0.9795333445072174, |
| "step": 782 |
| }, |
| { |
| "epoch": 4.580645161290323, |
| "grad_norm": 0.8952614421196792, |
| "learning_rate": 2.515568200846787e-05, |
| "loss": 0.1028, |
| "mean_token_accuracy": 0.9680057391524315, |
| "step": 783 |
| }, |
| { |
| "epoch": 4.586510263929619, |
| "grad_norm": 0.715413936449577, |
| "learning_rate": 2.5121914714658526e-05, |
| "loss": 0.0877, |
| "mean_token_accuracy": 0.9725442752242088, |
| "step": 784 |
| }, |
| { |
| "epoch": 4.592375366568915, |
| "grad_norm": 0.6999767791173405, |
| "learning_rate": 2.5088136089559636e-05, |
| "loss": 0.0772, |
| "mean_token_accuracy": 0.9761156216263771, |
| "step": 785 |
| }, |
| { |
| "epoch": 4.5982404692082115, |
| "grad_norm": 0.7377420435043683, |
| "learning_rate": 2.5054346255773952e-05, |
| "loss": 0.0711, |
| "mean_token_accuracy": 0.9770561680197716, |
| "step": 786 |
| }, |
| { |
| "epoch": 4.604105571847508, |
| "grad_norm": 0.8856700189046613, |
| "learning_rate": 2.502054533594493e-05, |
| "loss": 0.0823, |
| "mean_token_accuracy": 0.975522093474865, |
| "step": 787 |
| }, |
| { |
| "epoch": 4.609970674486804, |
| "grad_norm": 0.8051438952416194, |
| "learning_rate": 2.4986733452756264e-05, |
| "loss": 0.098, |
| "mean_token_accuracy": 0.974338486790657, |
| "step": 788 |
| }, |
| { |
| "epoch": 4.6158357771261, |
| "grad_norm": 0.6892859753641255, |
| "learning_rate": 2.495291072893142e-05, |
| "loss": 0.0865, |
| "mean_token_accuracy": 0.9751565381884575, |
| "step": 789 |
| }, |
| { |
| "epoch": 4.621700879765396, |
| "grad_norm": 0.731544286085826, |
| "learning_rate": 2.4919077287233237e-05, |
| "loss": 0.091, |
| "mean_token_accuracy": 0.975447840988636, |
| "step": 790 |
| }, |
| { |
| "epoch": 4.627565982404692, |
| "grad_norm": 0.7861172613577541, |
| "learning_rate": 2.4885233250463445e-05, |
| "loss": 0.0939, |
| "mean_token_accuracy": 0.9728690907359123, |
| "step": 791 |
| }, |
| { |
| "epoch": 4.633431085043989, |
| "grad_norm": 0.7402637083225126, |
| "learning_rate": 2.485137874146222e-05, |
| "loss": 0.0921, |
| "mean_token_accuracy": 0.9696981385350227, |
| "step": 792 |
| }, |
| { |
| "epoch": 4.639296187683285, |
| "grad_norm": 0.8249445529463938, |
| "learning_rate": 2.4817513883107762e-05, |
| "loss": 0.1127, |
| "mean_token_accuracy": 0.9655818939208984, |
| "step": 793 |
| }, |
| { |
| "epoch": 4.645161290322581, |
| "grad_norm": 0.7805981393705633, |
| "learning_rate": 2.4783638798315822e-05, |
| "loss": 0.0865, |
| "mean_token_accuracy": 0.9749421775341034, |
| "step": 794 |
| }, |
| { |
| "epoch": 4.651026392961877, |
| "grad_norm": 0.693516849302169, |
| "learning_rate": 2.4749753610039288e-05, |
| "loss": 0.0767, |
| "mean_token_accuracy": 0.9765638262033463, |
| "step": 795 |
| }, |
| { |
| "epoch": 4.656891495601173, |
| "grad_norm": 0.6592370960932631, |
| "learning_rate": 2.4715858441267706e-05, |
| "loss": 0.0848, |
| "mean_token_accuracy": 0.9731608182191849, |
| "step": 796 |
| }, |
| { |
| "epoch": 4.6627565982404695, |
| "grad_norm": 0.8410351105686258, |
| "learning_rate": 2.4681953415026845e-05, |
| "loss": 0.0947, |
| "mean_token_accuracy": 0.9714157283306122, |
| "step": 797 |
| }, |
| { |
| "epoch": 4.668621700879766, |
| "grad_norm": 0.6292824938428746, |
| "learning_rate": 2.464803865437826e-05, |
| "loss": 0.0805, |
| "mean_token_accuracy": 0.9759851396083832, |
| "step": 798 |
| }, |
| { |
| "epoch": 4.674486803519062, |
| "grad_norm": 0.8578069940125966, |
| "learning_rate": 2.461411428241883e-05, |
| "loss": 0.0975, |
| "mean_token_accuracy": 0.9713935777544975, |
| "step": 799 |
| }, |
| { |
| "epoch": 4.680351906158358, |
| "grad_norm": 0.6320034192833647, |
| "learning_rate": 2.4580180422280325e-05, |
| "loss": 0.0824, |
| "mean_token_accuracy": 0.9738278761506081, |
| "step": 800 |
| }, |
| { |
| "epoch": 4.686217008797654, |
| "grad_norm": 0.7988988939828763, |
| "learning_rate": 2.4546237197128955e-05, |
| "loss": 0.0945, |
| "mean_token_accuracy": 0.9745538905262947, |
| "step": 801 |
| }, |
| { |
| "epoch": 4.69208211143695, |
| "grad_norm": 0.7108652667816605, |
| "learning_rate": 2.451228473016492e-05, |
| "loss": 0.0821, |
| "mean_token_accuracy": 0.9754965752363205, |
| "step": 802 |
| }, |
| { |
| "epoch": 4.697947214076247, |
| "grad_norm": 0.7536562662597699, |
| "learning_rate": 2.447832314462196e-05, |
| "loss": 0.0919, |
| "mean_token_accuracy": 0.9743342474102974, |
| "step": 803 |
| }, |
| { |
| "epoch": 4.703812316715543, |
| "grad_norm": 0.6242244952368606, |
| "learning_rate": 2.444435256376692e-05, |
| "loss": 0.0798, |
| "mean_token_accuracy": 0.976905569434166, |
| "step": 804 |
| }, |
| { |
| "epoch": 4.709677419354839, |
| "grad_norm": 0.6397473344753726, |
| "learning_rate": 2.4410373110899278e-05, |
| "loss": 0.0712, |
| "mean_token_accuracy": 0.9791742563247681, |
| "step": 805 |
| }, |
| { |
| "epoch": 4.715542521994135, |
| "grad_norm": 0.7440219387317061, |
| "learning_rate": 2.4376384909350735e-05, |
| "loss": 0.1014, |
| "mean_token_accuracy": 0.9721665903925896, |
| "step": 806 |
| }, |
| { |
| "epoch": 4.721407624633431, |
| "grad_norm": 0.6325421602189405, |
| "learning_rate": 2.434238808248472e-05, |
| "loss": 0.0807, |
| "mean_token_accuracy": 0.9754432812333107, |
| "step": 807 |
| }, |
| { |
| "epoch": 4.7272727272727275, |
| "grad_norm": 0.6613651496769305, |
| "learning_rate": 2.4308382753696e-05, |
| "loss": 0.0844, |
| "mean_token_accuracy": 0.9745671674609184, |
| "step": 808 |
| }, |
| { |
| "epoch": 4.733137829912024, |
| "grad_norm": 0.655517420614535, |
| "learning_rate": 2.4274369046410183e-05, |
| "loss": 0.0873, |
| "mean_token_accuracy": 0.9765310138463974, |
| "step": 809 |
| }, |
| { |
| "epoch": 4.73900293255132, |
| "grad_norm": 0.6824568045638673, |
| "learning_rate": 2.4240347084083284e-05, |
| "loss": 0.0883, |
| "mean_token_accuracy": 0.9757503718137741, |
| "step": 810 |
| }, |
| { |
| "epoch": 4.744868035190616, |
| "grad_norm": 0.7344137920344779, |
| "learning_rate": 2.4206316990201288e-05, |
| "loss": 0.091, |
| "mean_token_accuracy": 0.9691428020596504, |
| "step": 811 |
| }, |
| { |
| "epoch": 4.750733137829912, |
| "grad_norm": 0.7658530881044803, |
| "learning_rate": 2.4172278888279686e-05, |
| "loss": 0.1013, |
| "mean_token_accuracy": 0.972835585474968, |
| "step": 812 |
| }, |
| { |
| "epoch": 4.756598240469208, |
| "grad_norm": 0.682396678860371, |
| "learning_rate": 2.4138232901863053e-05, |
| "loss": 0.0838, |
| "mean_token_accuracy": 0.9742336198687553, |
| "step": 813 |
| }, |
| { |
| "epoch": 4.762463343108505, |
| "grad_norm": 0.7462145098935278, |
| "learning_rate": 2.4104179154524557e-05, |
| "loss": 0.0843, |
| "mean_token_accuracy": 0.9767311811447144, |
| "step": 814 |
| }, |
| { |
| "epoch": 4.768328445747801, |
| "grad_norm": 0.647595797993278, |
| "learning_rate": 2.4070117769865554e-05, |
| "loss": 0.074, |
| "mean_token_accuracy": 0.9771558046340942, |
| "step": 815 |
| }, |
| { |
| "epoch": 4.774193548387097, |
| "grad_norm": 0.6375854586116831, |
| "learning_rate": 2.403604887151512e-05, |
| "loss": 0.0825, |
| "mean_token_accuracy": 0.9733422249555588, |
| "step": 816 |
| }, |
| { |
| "epoch": 4.780058651026393, |
| "grad_norm": 0.6430806428065273, |
| "learning_rate": 2.400197258312959e-05, |
| "loss": 0.0761, |
| "mean_token_accuracy": 0.9772883579134941, |
| "step": 817 |
| }, |
| { |
| "epoch": 4.785923753665689, |
| "grad_norm": 0.7027690903069118, |
| "learning_rate": 2.3967889028392115e-05, |
| "loss": 0.0732, |
| "mean_token_accuracy": 0.9762856140732765, |
| "step": 818 |
| }, |
| { |
| "epoch": 4.7917888563049855, |
| "grad_norm": 0.7915351422852497, |
| "learning_rate": 2.3933798331012255e-05, |
| "loss": 0.0941, |
| "mean_token_accuracy": 0.971655435860157, |
| "step": 819 |
| }, |
| { |
| "epoch": 4.797653958944282, |
| "grad_norm": 0.8230077673870994, |
| "learning_rate": 2.3899700614725458e-05, |
| "loss": 0.0973, |
| "mean_token_accuracy": 0.9686101600527763, |
| "step": 820 |
| }, |
| { |
| "epoch": 4.803519061583578, |
| "grad_norm": 0.8204373001199515, |
| "learning_rate": 2.3865596003292674e-05, |
| "loss": 0.097, |
| "mean_token_accuracy": 0.9719519168138504, |
| "step": 821 |
| }, |
| { |
| "epoch": 4.809384164222874, |
| "grad_norm": 0.7083813400624878, |
| "learning_rate": 2.3831484620499867e-05, |
| "loss": 0.0842, |
| "mean_token_accuracy": 0.976848654448986, |
| "step": 822 |
| }, |
| { |
| "epoch": 4.81524926686217, |
| "grad_norm": 0.7705104881876454, |
| "learning_rate": 2.3797366590157565e-05, |
| "loss": 0.0997, |
| "mean_token_accuracy": 0.9676300510764122, |
| "step": 823 |
| }, |
| { |
| "epoch": 4.821114369501466, |
| "grad_norm": 0.726991332245595, |
| "learning_rate": 2.3763242036100457e-05, |
| "loss": 0.0831, |
| "mean_token_accuracy": 0.974996529519558, |
| "step": 824 |
| }, |
| { |
| "epoch": 4.826979472140763, |
| "grad_norm": 0.6741623993634527, |
| "learning_rate": 2.372911108218688e-05, |
| "loss": 0.0863, |
| "mean_token_accuracy": 0.9751091077923775, |
| "step": 825 |
| }, |
| { |
| "epoch": 4.832844574780059, |
| "grad_norm": 0.899437782138063, |
| "learning_rate": 2.3694973852298425e-05, |
| "loss": 0.1008, |
| "mean_token_accuracy": 0.9714139476418495, |
| "step": 826 |
| }, |
| { |
| "epoch": 4.838709677419355, |
| "grad_norm": 0.702151665184058, |
| "learning_rate": 2.3660830470339436e-05, |
| "loss": 0.0814, |
| "mean_token_accuracy": 0.9744185507297516, |
| "step": 827 |
| }, |
| { |
| "epoch": 4.844574780058651, |
| "grad_norm": 0.6507730374741707, |
| "learning_rate": 2.362668106023661e-05, |
| "loss": 0.0829, |
| "mean_token_accuracy": 0.9753250107169151, |
| "step": 828 |
| }, |
| { |
| "epoch": 4.850439882697947, |
| "grad_norm": 0.9585116172423198, |
| "learning_rate": 2.3592525745938515e-05, |
| "loss": 0.0916, |
| "mean_token_accuracy": 0.974070705473423, |
| "step": 829 |
| }, |
| { |
| "epoch": 4.8563049853372435, |
| "grad_norm": 0.6456555295026087, |
| "learning_rate": 2.355836465141513e-05, |
| "loss": 0.0765, |
| "mean_token_accuracy": 0.9768242910504341, |
| "step": 830 |
| }, |
| { |
| "epoch": 4.86217008797654, |
| "grad_norm": 0.7629455851838094, |
| "learning_rate": 2.3524197900657447e-05, |
| "loss": 0.1012, |
| "mean_token_accuracy": 0.9716575890779495, |
| "step": 831 |
| }, |
| { |
| "epoch": 4.868035190615836, |
| "grad_norm": 0.6385751398243038, |
| "learning_rate": 2.3490025617676966e-05, |
| "loss": 0.0785, |
| "mean_token_accuracy": 0.978433296084404, |
| "step": 832 |
| }, |
| { |
| "epoch": 4.873900293255132, |
| "grad_norm": 0.7762741748913455, |
| "learning_rate": 2.3455847926505283e-05, |
| "loss": 0.0998, |
| "mean_token_accuracy": 0.9690258279442787, |
| "step": 833 |
| }, |
| { |
| "epoch": 4.879765395894428, |
| "grad_norm": 0.6712371371046408, |
| "learning_rate": 2.3421664951193596e-05, |
| "loss": 0.0875, |
| "mean_token_accuracy": 0.9740894809365273, |
| "step": 834 |
| }, |
| { |
| "epoch": 4.885630498533724, |
| "grad_norm": 0.7441124698561299, |
| "learning_rate": 2.3387476815812313e-05, |
| "loss": 0.0927, |
| "mean_token_accuracy": 0.9717249646782875, |
| "step": 835 |
| }, |
| { |
| "epoch": 4.891495601173021, |
| "grad_norm": 0.8250944839663914, |
| "learning_rate": 2.3353283644450556e-05, |
| "loss": 0.1051, |
| "mean_token_accuracy": 0.9686624780297279, |
| "step": 836 |
| }, |
| { |
| "epoch": 4.897360703812317, |
| "grad_norm": 0.7052455188209211, |
| "learning_rate": 2.3319085561215724e-05, |
| "loss": 0.0904, |
| "mean_token_accuracy": 0.9723523110151291, |
| "step": 837 |
| }, |
| { |
| "epoch": 4.903225806451613, |
| "grad_norm": 0.7239462558423183, |
| "learning_rate": 2.328488269023305e-05, |
| "loss": 0.0825, |
| "mean_token_accuracy": 0.9772523939609528, |
| "step": 838 |
| }, |
| { |
| "epoch": 4.909090909090909, |
| "grad_norm": 0.7180840126524404, |
| "learning_rate": 2.3250675155645136e-05, |
| "loss": 0.0864, |
| "mean_token_accuracy": 0.9757138639688492, |
| "step": 839 |
| }, |
| { |
| "epoch": 4.914956011730205, |
| "grad_norm": 0.6810604724605698, |
| "learning_rate": 2.3216463081611525e-05, |
| "loss": 0.0734, |
| "mean_token_accuracy": 0.9767258539795876, |
| "step": 840 |
| }, |
| { |
| "epoch": 4.9208211143695015, |
| "grad_norm": 0.9004959050046102, |
| "learning_rate": 2.3182246592308235e-05, |
| "loss": 0.1048, |
| "mean_token_accuracy": 0.9709803834557533, |
| "step": 841 |
| }, |
| { |
| "epoch": 4.926686217008798, |
| "grad_norm": 0.6903589644398147, |
| "learning_rate": 2.314802581192728e-05, |
| "loss": 0.0861, |
| "mean_token_accuracy": 0.9735254496335983, |
| "step": 842 |
| }, |
| { |
| "epoch": 4.932551319648094, |
| "grad_norm": 0.9388649809230527, |
| "learning_rate": 2.311380086467629e-05, |
| "loss": 0.1087, |
| "mean_token_accuracy": 0.9675555154681206, |
| "step": 843 |
| }, |
| { |
| "epoch": 4.93841642228739, |
| "grad_norm": 0.6728193407529811, |
| "learning_rate": 2.3079571874778e-05, |
| "loss": 0.0933, |
| "mean_token_accuracy": 0.9730138704180717, |
| "step": 844 |
| }, |
| { |
| "epoch": 4.944281524926686, |
| "grad_norm": 0.6533598028959027, |
| "learning_rate": 2.304533896646981e-05, |
| "loss": 0.0847, |
| "mean_token_accuracy": 0.9751003682613373, |
| "step": 845 |
| }, |
| { |
| "epoch": 4.9501466275659824, |
| "grad_norm": 0.6164310940206862, |
| "learning_rate": 2.3011102264003354e-05, |
| "loss": 0.0775, |
| "mean_token_accuracy": 0.9751841053366661, |
| "step": 846 |
| }, |
| { |
| "epoch": 4.956011730205279, |
| "grad_norm": 0.6639517976692416, |
| "learning_rate": 2.2976861891644045e-05, |
| "loss": 0.0836, |
| "mean_token_accuracy": 0.9767726510763168, |
| "step": 847 |
| }, |
| { |
| "epoch": 4.961876832844575, |
| "grad_norm": 0.62090714661712, |
| "learning_rate": 2.2942617973670596e-05, |
| "loss": 0.0728, |
| "mean_token_accuracy": 0.9758273363113403, |
| "step": 848 |
| }, |
| { |
| "epoch": 4.967741935483871, |
| "grad_norm": 0.8452829280392841, |
| "learning_rate": 2.2908370634374603e-05, |
| "loss": 0.1073, |
| "mean_token_accuracy": 0.9687742963433266, |
| "step": 849 |
| }, |
| { |
| "epoch": 4.973607038123167, |
| "grad_norm": 0.6759201564046626, |
| "learning_rate": 2.287411999806007e-05, |
| "loss": 0.0801, |
| "mean_token_accuracy": 0.9753365591168404, |
| "step": 850 |
| }, |
| { |
| "epoch": 4.979472140762463, |
| "grad_norm": 0.8874012153781123, |
| "learning_rate": 2.2839866189042983e-05, |
| "loss": 0.0851, |
| "mean_token_accuracy": 0.9758308529853821, |
| "step": 851 |
| }, |
| { |
| "epoch": 4.9853372434017595, |
| "grad_norm": 0.6394628435704744, |
| "learning_rate": 2.2805609331650826e-05, |
| "loss": 0.0887, |
| "mean_token_accuracy": 0.9720895141363144, |
| "step": 852 |
| }, |
| { |
| "epoch": 4.991202346041056, |
| "grad_norm": 0.6712758849886379, |
| "learning_rate": 2.2771349550222158e-05, |
| "loss": 0.0802, |
| "mean_token_accuracy": 0.9771791622042656, |
| "step": 853 |
| }, |
| { |
| "epoch": 4.997067448680352, |
| "grad_norm": 0.6076118168062584, |
| "learning_rate": 2.273708696910616e-05, |
| "loss": 0.0766, |
| "mean_token_accuracy": 0.9765864163637161, |
| "step": 854 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.6076118168062584, |
| "learning_rate": 2.2702821712662147e-05, |
| "loss": 0.073, |
| "mean_token_accuracy": 0.9801962226629257, |
| "step": 855 |
| }, |
| { |
| "epoch": 5.005865102639296, |
| "grad_norm": 1.0026765048680202, |
| "learning_rate": 2.2668553905259168e-05, |
| "loss": 0.0693, |
| "mean_token_accuracy": 0.9787380993366241, |
| "step": 856 |
| }, |
| { |
| "epoch": 5.011730205278592, |
| "grad_norm": 0.4886645531830975, |
| "learning_rate": 2.2634283671275523e-05, |
| "loss": 0.0589, |
| "mean_token_accuracy": 0.9821438789367676, |
| "step": 857 |
| }, |
| { |
| "epoch": 5.0175953079178885, |
| "grad_norm": 0.5221300385899804, |
| "learning_rate": 2.2600011135098323e-05, |
| "loss": 0.0647, |
| "mean_token_accuracy": 0.980193242430687, |
| "step": 858 |
| }, |
| { |
| "epoch": 5.023460410557185, |
| "grad_norm": 0.506920475477819, |
| "learning_rate": 2.2565736421123035e-05, |
| "loss": 0.0759, |
| "mean_token_accuracy": 0.9781730622053146, |
| "step": 859 |
| }, |
| { |
| "epoch": 5.029325513196481, |
| "grad_norm": 0.7195873400454875, |
| "learning_rate": 2.253145965375302e-05, |
| "loss": 0.0839, |
| "mean_token_accuracy": 0.974876344203949, |
| "step": 860 |
| }, |
| { |
| "epoch": 5.035190615835777, |
| "grad_norm": 0.5533130891308242, |
| "learning_rate": 2.2497180957399108e-05, |
| "loss": 0.0788, |
| "mean_token_accuracy": 0.9747349694371223, |
| "step": 861 |
| }, |
| { |
| "epoch": 5.041055718475073, |
| "grad_norm": 0.6804425923592532, |
| "learning_rate": 2.246290045647912e-05, |
| "loss": 0.0643, |
| "mean_token_accuracy": 0.9811800122261047, |
| "step": 862 |
| }, |
| { |
| "epoch": 5.0469208211143695, |
| "grad_norm": 0.5660705321290153, |
| "learning_rate": 2.242861827541742e-05, |
| "loss": 0.0598, |
| "mean_token_accuracy": 0.9806637167930603, |
| "step": 863 |
| }, |
| { |
| "epoch": 5.052785923753666, |
| "grad_norm": 0.4533661141133845, |
| "learning_rate": 2.2394334538644494e-05, |
| "loss": 0.0683, |
| "mean_token_accuracy": 0.9784892648458481, |
| "step": 864 |
| }, |
| { |
| "epoch": 5.058651026392962, |
| "grad_norm": 0.7256930320993769, |
| "learning_rate": 2.2360049370596454e-05, |
| "loss": 0.0753, |
| "mean_token_accuracy": 0.9793033376336098, |
| "step": 865 |
| }, |
| { |
| "epoch": 5.064516129032258, |
| "grad_norm": 0.7563840557949948, |
| "learning_rate": 2.2325762895714616e-05, |
| "loss": 0.0776, |
| "mean_token_accuracy": 0.9749964252114296, |
| "step": 866 |
| }, |
| { |
| "epoch": 5.070381231671554, |
| "grad_norm": 0.6027240259420076, |
| "learning_rate": 2.2291475238445033e-05, |
| "loss": 0.0748, |
| "mean_token_accuracy": 0.977846160531044, |
| "step": 867 |
| }, |
| { |
| "epoch": 5.07624633431085, |
| "grad_norm": 0.7312019193066984, |
| "learning_rate": 2.225718652323805e-05, |
| "loss": 0.0748, |
| "mean_token_accuracy": 0.9755967482924461, |
| "step": 868 |
| }, |
| { |
| "epoch": 5.0821114369501466, |
| "grad_norm": 0.5400054143225983, |
| "learning_rate": 2.2222896874547856e-05, |
| "loss": 0.08, |
| "mean_token_accuracy": 0.9775899350643158, |
| "step": 869 |
| }, |
| { |
| "epoch": 5.087976539589443, |
| "grad_norm": 0.749736952140823, |
| "learning_rate": 2.2188606416832035e-05, |
| "loss": 0.063, |
| "mean_token_accuracy": 0.9818405732512474, |
| "step": 870 |
| }, |
| { |
| "epoch": 5.093841642228739, |
| "grad_norm": 0.7238913429587271, |
| "learning_rate": 2.2154315274551093e-05, |
| "loss": 0.0829, |
| "mean_token_accuracy": 0.9755804762244225, |
| "step": 871 |
| }, |
| { |
| "epoch": 5.099706744868035, |
| "grad_norm": 0.7005006356881465, |
| "learning_rate": 2.2120023572168026e-05, |
| "loss": 0.0678, |
| "mean_token_accuracy": 0.9808993488550186, |
| "step": 872 |
| }, |
| { |
| "epoch": 5.105571847507331, |
| "grad_norm": 0.6109157423351927, |
| "learning_rate": 2.208573143414787e-05, |
| "loss": 0.0637, |
| "mean_token_accuracy": 0.9813826605677605, |
| "step": 873 |
| }, |
| { |
| "epoch": 5.1114369501466275, |
| "grad_norm": 0.5061420246936063, |
| "learning_rate": 2.2051438984957234e-05, |
| "loss": 0.0657, |
| "mean_token_accuracy": 0.9802054837346077, |
| "step": 874 |
| }, |
| { |
| "epoch": 5.117302052785924, |
| "grad_norm": 0.553717575776044, |
| "learning_rate": 2.2017146349063855e-05, |
| "loss": 0.0792, |
| "mean_token_accuracy": 0.9763247072696686, |
| "step": 875 |
| }, |
| { |
| "epoch": 5.12316715542522, |
| "grad_norm": 0.6863136450685561, |
| "learning_rate": 2.1982853650936154e-05, |
| "loss": 0.0713, |
| "mean_token_accuracy": 0.9781040996313095, |
| "step": 876 |
| }, |
| { |
| "epoch": 5.129032258064516, |
| "grad_norm": 0.5124269447535285, |
| "learning_rate": 2.1948561015042772e-05, |
| "loss": 0.0733, |
| "mean_token_accuracy": 0.978808619081974, |
| "step": 877 |
| }, |
| { |
| "epoch": 5.134897360703812, |
| "grad_norm": 0.6562503546420304, |
| "learning_rate": 2.1914268565852134e-05, |
| "loss": 0.0768, |
| "mean_token_accuracy": 0.9771819114685059, |
| "step": 878 |
| }, |
| { |
| "epoch": 5.140762463343108, |
| "grad_norm": 0.5648878023806803, |
| "learning_rate": 2.1879976427831983e-05, |
| "loss": 0.0746, |
| "mean_token_accuracy": 0.9797395169734955, |
| "step": 879 |
| }, |
| { |
| "epoch": 5.146627565982405, |
| "grad_norm": 0.5567734178801955, |
| "learning_rate": 2.1845684725448916e-05, |
| "loss": 0.0774, |
| "mean_token_accuracy": 0.9762084037065506, |
| "step": 880 |
| }, |
| { |
| "epoch": 5.152492668621701, |
| "grad_norm": 0.604016753073881, |
| "learning_rate": 2.181139358316797e-05, |
| "loss": 0.0799, |
| "mean_token_accuracy": 0.9763386994600296, |
| "step": 881 |
| }, |
| { |
| "epoch": 5.158357771260997, |
| "grad_norm": 0.7020291349192862, |
| "learning_rate": 2.1777103125452146e-05, |
| "loss": 0.0716, |
| "mean_token_accuracy": 0.9784782081842422, |
| "step": 882 |
| }, |
| { |
| "epoch": 5.164222873900293, |
| "grad_norm": 0.7007843726478299, |
| "learning_rate": 2.1742813476761958e-05, |
| "loss": 0.0827, |
| "mean_token_accuracy": 0.9760249108076096, |
| "step": 883 |
| }, |
| { |
| "epoch": 5.170087976539589, |
| "grad_norm": 0.5702016510876317, |
| "learning_rate": 2.1708524761554973e-05, |
| "loss": 0.0754, |
| "mean_token_accuracy": 0.9762247651815414, |
| "step": 884 |
| }, |
| { |
| "epoch": 5.1759530791788855, |
| "grad_norm": 0.5730616842732398, |
| "learning_rate": 2.1674237104285393e-05, |
| "loss": 0.0668, |
| "mean_token_accuracy": 0.9794589728116989, |
| "step": 885 |
| }, |
| { |
| "epoch": 5.181818181818182, |
| "grad_norm": 0.49051489661277814, |
| "learning_rate": 2.1639950629403552e-05, |
| "loss": 0.0627, |
| "mean_token_accuracy": 0.9801739826798439, |
| "step": 886 |
| }, |
| { |
| "epoch": 5.187683284457478, |
| "grad_norm": 0.6147350528773317, |
| "learning_rate": 2.1605665461355515e-05, |
| "loss": 0.0748, |
| "mean_token_accuracy": 0.9772293791174889, |
| "step": 887 |
| }, |
| { |
| "epoch": 5.193548387096774, |
| "grad_norm": 0.574887287790943, |
| "learning_rate": 2.1571381724582588e-05, |
| "loss": 0.0737, |
| "mean_token_accuracy": 0.9775480031967163, |
| "step": 888 |
| }, |
| { |
| "epoch": 5.19941348973607, |
| "grad_norm": 0.5802402780421291, |
| "learning_rate": 2.153709954352089e-05, |
| "loss": 0.0707, |
| "mean_token_accuracy": 0.9791269749403, |
| "step": 889 |
| }, |
| { |
| "epoch": 5.205278592375366, |
| "grad_norm": 0.5892378515440839, |
| "learning_rate": 2.15028190426009e-05, |
| "loss": 0.0795, |
| "mean_token_accuracy": 0.9762731716036797, |
| "step": 890 |
| }, |
| { |
| "epoch": 5.211143695014663, |
| "grad_norm": 0.561441923874465, |
| "learning_rate": 2.1468540346246986e-05, |
| "loss": 0.0825, |
| "mean_token_accuracy": 0.9746378436684608, |
| "step": 891 |
| }, |
| { |
| "epoch": 5.217008797653959, |
| "grad_norm": 0.7693860021671447, |
| "learning_rate": 2.143426357887697e-05, |
| "loss": 0.0765, |
| "mean_token_accuracy": 0.9771846383810043, |
| "step": 892 |
| }, |
| { |
| "epoch": 5.222873900293255, |
| "grad_norm": 0.5880074332718885, |
| "learning_rate": 2.139998886490169e-05, |
| "loss": 0.0632, |
| "mean_token_accuracy": 0.9813052341341972, |
| "step": 893 |
| }, |
| { |
| "epoch": 5.228739002932551, |
| "grad_norm": 0.5589521393363575, |
| "learning_rate": 2.136571632872449e-05, |
| "loss": 0.0783, |
| "mean_token_accuracy": 0.9764862582087517, |
| "step": 894 |
| }, |
| { |
| "epoch": 5.234604105571847, |
| "grad_norm": 0.6958118598247672, |
| "learning_rate": 2.1331446094740845e-05, |
| "loss": 0.0813, |
| "mean_token_accuracy": 0.9756343215703964, |
| "step": 895 |
| }, |
| { |
| "epoch": 5.2404692082111435, |
| "grad_norm": 0.648605914341562, |
| "learning_rate": 2.1297178287337865e-05, |
| "loss": 0.0736, |
| "mean_token_accuracy": 0.9772472456097603, |
| "step": 896 |
| }, |
| { |
| "epoch": 5.24633431085044, |
| "grad_norm": 0.6859493516947144, |
| "learning_rate": 2.1262913030893855e-05, |
| "loss": 0.0773, |
| "mean_token_accuracy": 0.9786031097173691, |
| "step": 897 |
| }, |
| { |
| "epoch": 5.252199413489736, |
| "grad_norm": 0.6803915454335052, |
| "learning_rate": 2.1228650449777848e-05, |
| "loss": 0.0798, |
| "mean_token_accuracy": 0.9753960222005844, |
| "step": 898 |
| }, |
| { |
| "epoch": 5.258064516129032, |
| "grad_norm": 0.5949700617476797, |
| "learning_rate": 2.1194390668349186e-05, |
| "loss": 0.0771, |
| "mean_token_accuracy": 0.9780300334095955, |
| "step": 899 |
| }, |
| { |
| "epoch": 5.263929618768328, |
| "grad_norm": 0.7410245541650864, |
| "learning_rate": 2.116013381095703e-05, |
| "loss": 0.0697, |
| "mean_token_accuracy": 0.9797634407877922, |
| "step": 900 |
| }, |
| { |
| "epoch": 5.269794721407624, |
| "grad_norm": 0.4455821502764853, |
| "learning_rate": 2.112588000193994e-05, |
| "loss": 0.0712, |
| "mean_token_accuracy": 0.9782530590891838, |
| "step": 901 |
| }, |
| { |
| "epoch": 5.275659824046921, |
| "grad_norm": 0.6951534941196735, |
| "learning_rate": 2.1091629365625403e-05, |
| "loss": 0.067, |
| "mean_token_accuracy": 0.979590117931366, |
| "step": 902 |
| }, |
| { |
| "epoch": 5.281524926686217, |
| "grad_norm": 0.5693519513539006, |
| "learning_rate": 2.105738202632941e-05, |
| "loss": 0.0795, |
| "mean_token_accuracy": 0.97862908244133, |
| "step": 903 |
| }, |
| { |
| "epoch": 5.287390029325513, |
| "grad_norm": 0.6074837432744669, |
| "learning_rate": 2.1023138108355957e-05, |
| "loss": 0.0611, |
| "mean_token_accuracy": 0.9821363985538483, |
| "step": 904 |
| }, |
| { |
| "epoch": 5.293255131964809, |
| "grad_norm": 0.49778279285216703, |
| "learning_rate": 2.098889773599665e-05, |
| "loss": 0.0843, |
| "mean_token_accuracy": 0.9768336862325668, |
| "step": 905 |
| }, |
| { |
| "epoch": 5.299120234604105, |
| "grad_norm": 0.7254806511844373, |
| "learning_rate": 2.0954661033530193e-05, |
| "loss": 0.0663, |
| "mean_token_accuracy": 0.9805325642228127, |
| "step": 906 |
| }, |
| { |
| "epoch": 5.3049853372434015, |
| "grad_norm": 0.4628049857510997, |
| "learning_rate": 2.0920428125222004e-05, |
| "loss": 0.0717, |
| "mean_token_accuracy": 0.9796890690922737, |
| "step": 907 |
| }, |
| { |
| "epoch": 5.310850439882698, |
| "grad_norm": 0.48912434388430215, |
| "learning_rate": 2.0886199135323712e-05, |
| "loss": 0.0773, |
| "mean_token_accuracy": 0.9784531816840172, |
| "step": 908 |
| }, |
| { |
| "epoch": 5.316715542521994, |
| "grad_norm": 0.6523411765325368, |
| "learning_rate": 2.085197418807272e-05, |
| "loss": 0.0677, |
| "mean_token_accuracy": 0.9798686727881432, |
| "step": 909 |
| }, |
| { |
| "epoch": 5.32258064516129, |
| "grad_norm": 0.6280379928431413, |
| "learning_rate": 2.0817753407691774e-05, |
| "loss": 0.0765, |
| "mean_token_accuracy": 0.9757416620850563, |
| "step": 910 |
| }, |
| { |
| "epoch": 5.328445747800586, |
| "grad_norm": 0.6506838547506919, |
| "learning_rate": 2.0783536918388477e-05, |
| "loss": 0.0823, |
| "mean_token_accuracy": 0.9750187322497368, |
| "step": 911 |
| }, |
| { |
| "epoch": 5.334310850439882, |
| "grad_norm": 0.5740998141514729, |
| "learning_rate": 2.0749324844354867e-05, |
| "loss": 0.0721, |
| "mean_token_accuracy": 0.9790320321917534, |
| "step": 912 |
| }, |
| { |
| "epoch": 5.340175953079179, |
| "grad_norm": 0.6100250700673111, |
| "learning_rate": 2.0715117309766953e-05, |
| "loss": 0.0801, |
| "mean_token_accuracy": 0.9779443517327309, |
| "step": 913 |
| }, |
| { |
| "epoch": 5.346041055718475, |
| "grad_norm": 0.6254016260556631, |
| "learning_rate": 2.068091443878428e-05, |
| "loss": 0.0806, |
| "mean_token_accuracy": 0.9745582416653633, |
| "step": 914 |
| }, |
| { |
| "epoch": 5.351906158357771, |
| "grad_norm": 0.6948395876064227, |
| "learning_rate": 2.064671635554945e-05, |
| "loss": 0.0972, |
| "mean_token_accuracy": 0.9737641960382462, |
| "step": 915 |
| }, |
| { |
| "epoch": 5.357771260997067, |
| "grad_norm": 0.7081713644165885, |
| "learning_rate": 2.0612523184187693e-05, |
| "loss": 0.066, |
| "mean_token_accuracy": 0.9779355525970459, |
| "step": 916 |
| }, |
| { |
| "epoch": 5.363636363636363, |
| "grad_norm": 0.5471530801482298, |
| "learning_rate": 2.057833504880641e-05, |
| "loss": 0.0723, |
| "mean_token_accuracy": 0.9762101992964745, |
| "step": 917 |
| }, |
| { |
| "epoch": 5.3695014662756595, |
| "grad_norm": 0.5378564258248137, |
| "learning_rate": 2.054415207349473e-05, |
| "loss": 0.0774, |
| "mean_token_accuracy": 0.976969949901104, |
| "step": 918 |
| }, |
| { |
| "epoch": 5.375366568914956, |
| "grad_norm": 0.5600324469227966, |
| "learning_rate": 2.0509974382323043e-05, |
| "loss": 0.0712, |
| "mean_token_accuracy": 0.9783627018332481, |
| "step": 919 |
| }, |
| { |
| "epoch": 5.381231671554252, |
| "grad_norm": 0.5079271037640322, |
| "learning_rate": 2.047580209934256e-05, |
| "loss": 0.078, |
| "mean_token_accuracy": 0.9777331501245499, |
| "step": 920 |
| }, |
| { |
| "epoch": 5.387096774193548, |
| "grad_norm": 0.6585224453075607, |
| "learning_rate": 2.0441635348584876e-05, |
| "loss": 0.0773, |
| "mean_token_accuracy": 0.9772825464606285, |
| "step": 921 |
| }, |
| { |
| "epoch": 5.392961876832844, |
| "grad_norm": 0.6247906925375318, |
| "learning_rate": 2.0407474254061498e-05, |
| "loss": 0.0831, |
| "mean_token_accuracy": 0.9741897881031036, |
| "step": 922 |
| }, |
| { |
| "epoch": 5.39882697947214, |
| "grad_norm": 0.5472561096522515, |
| "learning_rate": 2.0373318939763397e-05, |
| "loss": 0.0785, |
| "mean_token_accuracy": 0.976148895919323, |
| "step": 923 |
| }, |
| { |
| "epoch": 5.404692082111437, |
| "grad_norm": 0.6294772303187749, |
| "learning_rate": 2.033916952966057e-05, |
| "loss": 0.0683, |
| "mean_token_accuracy": 0.9790761917829514, |
| "step": 924 |
| }, |
| { |
| "epoch": 5.410557184750733, |
| "grad_norm": 0.47662725471278244, |
| "learning_rate": 2.0305026147701584e-05, |
| "loss": 0.0747, |
| "mean_token_accuracy": 0.9755368903279305, |
| "step": 925 |
| }, |
| { |
| "epoch": 5.416422287390029, |
| "grad_norm": 0.6610622738741952, |
| "learning_rate": 2.0270888917813124e-05, |
| "loss": 0.0671, |
| "mean_token_accuracy": 0.9788747951388359, |
| "step": 926 |
| }, |
| { |
| "epoch": 5.422287390029325, |
| "grad_norm": 0.5589675836077449, |
| "learning_rate": 2.0236757963899548e-05, |
| "loss": 0.0738, |
| "mean_token_accuracy": 0.9767239764332771, |
| "step": 927 |
| }, |
| { |
| "epoch": 5.428152492668621, |
| "grad_norm": 0.6019998421138993, |
| "learning_rate": 2.020263340984244e-05, |
| "loss": 0.0742, |
| "mean_token_accuracy": 0.9795248135924339, |
| "step": 928 |
| }, |
| { |
| "epoch": 5.4340175953079175, |
| "grad_norm": 0.5166261136130894, |
| "learning_rate": 2.0168515379500145e-05, |
| "loss": 0.0701, |
| "mean_token_accuracy": 0.9759251549839973, |
| "step": 929 |
| }, |
| { |
| "epoch": 5.439882697947214, |
| "grad_norm": 0.6006084519966477, |
| "learning_rate": 2.0134403996707338e-05, |
| "loss": 0.065, |
| "mean_token_accuracy": 0.9794113636016846, |
| "step": 930 |
| }, |
| { |
| "epoch": 5.44574780058651, |
| "grad_norm": 0.45582821622973035, |
| "learning_rate": 2.0100299385274547e-05, |
| "loss": 0.0713, |
| "mean_token_accuracy": 0.9784344360232353, |
| "step": 931 |
| }, |
| { |
| "epoch": 5.451612903225806, |
| "grad_norm": 0.652447127156715, |
| "learning_rate": 2.0066201668987757e-05, |
| "loss": 0.0875, |
| "mean_token_accuracy": 0.9729999005794525, |
| "step": 932 |
| }, |
| { |
| "epoch": 5.457478005865102, |
| "grad_norm": 0.5812504477926977, |
| "learning_rate": 2.0032110971607894e-05, |
| "loss": 0.0688, |
| "mean_token_accuracy": 0.980715274810791, |
| "step": 933 |
| }, |
| { |
| "epoch": 5.463343108504398, |
| "grad_norm": 0.5882443082261385, |
| "learning_rate": 1.999802741687042e-05, |
| "loss": 0.0768, |
| "mean_token_accuracy": 0.9780777394771576, |
| "step": 934 |
| }, |
| { |
| "epoch": 5.469208211143695, |
| "grad_norm": 0.48843000233363765, |
| "learning_rate": 1.9963951128484886e-05, |
| "loss": 0.0613, |
| "mean_token_accuracy": 0.980921059846878, |
| "step": 935 |
| }, |
| { |
| "epoch": 5.475073313782991, |
| "grad_norm": 0.578207102400483, |
| "learning_rate": 1.9929882230134452e-05, |
| "loss": 0.0716, |
| "mean_token_accuracy": 0.9766323640942574, |
| "step": 936 |
| }, |
| { |
| "epoch": 5.480938416422287, |
| "grad_norm": 0.5585603420730529, |
| "learning_rate": 1.9895820845475445e-05, |
| "loss": 0.074, |
| "mean_token_accuracy": 0.9760611280798912, |
| "step": 937 |
| }, |
| { |
| "epoch": 5.486803519061583, |
| "grad_norm": 0.5630265678683493, |
| "learning_rate": 1.9861767098136956e-05, |
| "loss": 0.0668, |
| "mean_token_accuracy": 0.9797999039292336, |
| "step": 938 |
| }, |
| { |
| "epoch": 5.492668621700879, |
| "grad_norm": 0.4600091835994956, |
| "learning_rate": 1.982772111172032e-05, |
| "loss": 0.0723, |
| "mean_token_accuracy": 0.979090228676796, |
| "step": 939 |
| }, |
| { |
| "epoch": 5.4985337243401755, |
| "grad_norm": 0.6002054250965584, |
| "learning_rate": 1.9793683009798718e-05, |
| "loss": 0.0679, |
| "mean_token_accuracy": 0.9809942319989204, |
| "step": 940 |
| }, |
| { |
| "epoch": 5.504398826979472, |
| "grad_norm": 0.6319575155464214, |
| "learning_rate": 1.975965291591672e-05, |
| "loss": 0.0921, |
| "mean_token_accuracy": 0.9735964983701706, |
| "step": 941 |
| }, |
| { |
| "epoch": 5.510263929618768, |
| "grad_norm": 0.7814944840670485, |
| "learning_rate": 1.9725630953589823e-05, |
| "loss": 0.0766, |
| "mean_token_accuracy": 0.9783463180065155, |
| "step": 942 |
| }, |
| { |
| "epoch": 5.516129032258064, |
| "grad_norm": 0.4898867119107616, |
| "learning_rate": 1.9691617246304007e-05, |
| "loss": 0.0705, |
| "mean_token_accuracy": 0.9760597050189972, |
| "step": 943 |
| }, |
| { |
| "epoch": 5.52199413489736, |
| "grad_norm": 0.6544194466244087, |
| "learning_rate": 1.9657611917515287e-05, |
| "loss": 0.0774, |
| "mean_token_accuracy": 0.9778344482183456, |
| "step": 944 |
| }, |
| { |
| "epoch": 5.527859237536656, |
| "grad_norm": 0.5424147306490898, |
| "learning_rate": 1.962361509064928e-05, |
| "loss": 0.0651, |
| "mean_token_accuracy": 0.9800689145922661, |
| "step": 945 |
| }, |
| { |
| "epoch": 5.533724340175953, |
| "grad_norm": 0.42614971645103156, |
| "learning_rate": 1.958962688910073e-05, |
| "loss": 0.0621, |
| "mean_token_accuracy": 0.980566717684269, |
| "step": 946 |
| }, |
| { |
| "epoch": 5.539589442815249, |
| "grad_norm": 0.4885883014556131, |
| "learning_rate": 1.9555647436233093e-05, |
| "loss": 0.071, |
| "mean_token_accuracy": 0.9800935760140419, |
| "step": 947 |
| }, |
| { |
| "epoch": 5.545454545454545, |
| "grad_norm": 0.5623124965585408, |
| "learning_rate": 1.9521676855378045e-05, |
| "loss": 0.0704, |
| "mean_token_accuracy": 0.9787362143397331, |
| "step": 948 |
| }, |
| { |
| "epoch": 5.551319648093841, |
| "grad_norm": 0.5876377449692796, |
| "learning_rate": 1.9487715269835082e-05, |
| "loss": 0.0651, |
| "mean_token_accuracy": 0.9789851978421211, |
| "step": 949 |
| }, |
| { |
| "epoch": 5.557184750733137, |
| "grad_norm": 0.5720532607343585, |
| "learning_rate": 1.945376280287105e-05, |
| "loss": 0.0793, |
| "mean_token_accuracy": 0.9742227792739868, |
| "step": 950 |
| }, |
| { |
| "epoch": 5.563049853372434, |
| "grad_norm": 0.6241120555707173, |
| "learning_rate": 1.9419819577719684e-05, |
| "loss": 0.0703, |
| "mean_token_accuracy": 0.979230061173439, |
| "step": 951 |
| }, |
| { |
| "epoch": 5.568914956011731, |
| "grad_norm": 0.6436812121385808, |
| "learning_rate": 1.9385885717581182e-05, |
| "loss": 0.0865, |
| "mean_token_accuracy": 0.973546139895916, |
| "step": 952 |
| }, |
| { |
| "epoch": 5.574780058651027, |
| "grad_norm": 0.5147412146073209, |
| "learning_rate": 1.935196134562175e-05, |
| "loss": 0.0669, |
| "mean_token_accuracy": 0.9807008281350136, |
| "step": 953 |
| }, |
| { |
| "epoch": 5.580645161290323, |
| "grad_norm": 0.5922549441597481, |
| "learning_rate": 1.931804658497316e-05, |
| "loss": 0.0686, |
| "mean_token_accuracy": 0.9795825853943825, |
| "step": 954 |
| }, |
| { |
| "epoch": 5.586510263929619, |
| "grad_norm": 0.6564154522631755, |
| "learning_rate": 1.9284141558732296e-05, |
| "loss": 0.0764, |
| "mean_token_accuracy": 0.9765199050307274, |
| "step": 955 |
| }, |
| { |
| "epoch": 5.592375366568915, |
| "grad_norm": 0.6294040843192674, |
| "learning_rate": 1.925024638996071e-05, |
| "loss": 0.0696, |
| "mean_token_accuracy": 0.9779465198516846, |
| "step": 956 |
| }, |
| { |
| "epoch": 5.5982404692082115, |
| "grad_norm": 0.46897658325826863, |
| "learning_rate": 1.9216361201684174e-05, |
| "loss": 0.0753, |
| "mean_token_accuracy": 0.9799123182892799, |
| "step": 957 |
| }, |
| { |
| "epoch": 5.604105571847508, |
| "grad_norm": 0.6226020057368947, |
| "learning_rate": 1.918248611689224e-05, |
| "loss": 0.0669, |
| "mean_token_accuracy": 0.9796951934695244, |
| "step": 958 |
| }, |
| { |
| "epoch": 5.609970674486804, |
| "grad_norm": 0.5494371989177863, |
| "learning_rate": 1.9148621258537782e-05, |
| "loss": 0.0755, |
| "mean_token_accuracy": 0.9753241837024689, |
| "step": 959 |
| }, |
| { |
| "epoch": 5.6158357771261, |
| "grad_norm": 0.6032363669586224, |
| "learning_rate": 1.911476674953656e-05, |
| "loss": 0.0579, |
| "mean_token_accuracy": 0.980271153151989, |
| "step": 960 |
| }, |
| { |
| "epoch": 5.621700879765396, |
| "grad_norm": 0.4847213278354494, |
| "learning_rate": 1.9080922712766762e-05, |
| "loss": 0.0723, |
| "mean_token_accuracy": 0.9749229624867439, |
| "step": 961 |
| }, |
| { |
| "epoch": 5.627565982404692, |
| "grad_norm": 0.491493201519099, |
| "learning_rate": 1.904708927106858e-05, |
| "loss": 0.0792, |
| "mean_token_accuracy": 0.9770414680242538, |
| "step": 962 |
| }, |
| { |
| "epoch": 5.633431085043989, |
| "grad_norm": 0.8035666675533314, |
| "learning_rate": 1.9013266547243742e-05, |
| "loss": 0.0693, |
| "mean_token_accuracy": 0.9798395037651062, |
| "step": 963 |
| }, |
| { |
| "epoch": 5.639296187683285, |
| "grad_norm": 0.5056374819958188, |
| "learning_rate": 1.8979454664055068e-05, |
| "loss": 0.0748, |
| "mean_token_accuracy": 0.9769620299339294, |
| "step": 964 |
| }, |
| { |
| "epoch": 5.645161290322581, |
| "grad_norm": 0.6874918618082315, |
| "learning_rate": 1.894565374422605e-05, |
| "loss": 0.0675, |
| "mean_token_accuracy": 0.9785389676690102, |
| "step": 965 |
| }, |
| { |
| "epoch": 5.651026392961877, |
| "grad_norm": 0.4157605250823316, |
| "learning_rate": 1.891186391044037e-05, |
| "loss": 0.0731, |
| "mean_token_accuracy": 0.975774921476841, |
| "step": 966 |
| }, |
| { |
| "epoch": 5.656891495601173, |
| "grad_norm": 0.5823531742180683, |
| "learning_rate": 1.887808528534148e-05, |
| "loss": 0.0955, |
| "mean_token_accuracy": 0.9756604135036469, |
| "step": 967 |
| }, |
| { |
| "epoch": 5.6627565982404695, |
| "grad_norm": 1.5182787676052627, |
| "learning_rate": 1.884431799153214e-05, |
| "loss": 0.0642, |
| "mean_token_accuracy": 0.9804680868983269, |
| "step": 968 |
| }, |
| { |
| "epoch": 5.668621700879766, |
| "grad_norm": 0.5599066990953598, |
| "learning_rate": 1.8810562151573993e-05, |
| "loss": 0.0745, |
| "mean_token_accuracy": 0.9779726639389992, |
| "step": 969 |
| }, |
| { |
| "epoch": 5.674486803519062, |
| "grad_norm": 0.6070035232067619, |
| "learning_rate": 1.8776817887987105e-05, |
| "loss": 0.0758, |
| "mean_token_accuracy": 0.9777623414993286, |
| "step": 970 |
| }, |
| { |
| "epoch": 5.680351906158358, |
| "grad_norm": 0.5298345268067092, |
| "learning_rate": 1.8743085323249527e-05, |
| "loss": 0.0743, |
| "mean_token_accuracy": 0.9766695126891136, |
| "step": 971 |
| }, |
| { |
| "epoch": 5.686217008797654, |
| "grad_norm": 0.5296462474949736, |
| "learning_rate": 1.870936457979684e-05, |
| "loss": 0.074, |
| "mean_token_accuracy": 0.9775098264217377, |
| "step": 972 |
| }, |
| { |
| "epoch": 5.69208211143695, |
| "grad_norm": 0.5596310685328338, |
| "learning_rate": 1.8675655780021733e-05, |
| "loss": 0.0634, |
| "mean_token_accuracy": 0.9791462272405624, |
| "step": 973 |
| }, |
| { |
| "epoch": 5.697947214076247, |
| "grad_norm": 0.6286572843009949, |
| "learning_rate": 1.8641959046273525e-05, |
| "loss": 0.0784, |
| "mean_token_accuracy": 0.9750925973057747, |
| "step": 974 |
| }, |
| { |
| "epoch": 5.703812316715543, |
| "grad_norm": 0.5273708760096594, |
| "learning_rate": 1.8608274500857756e-05, |
| "loss": 0.0766, |
| "mean_token_accuracy": 0.9776112586259842, |
| "step": 975 |
| }, |
| { |
| "epoch": 5.709677419354839, |
| "grad_norm": 0.5577384204329816, |
| "learning_rate": 1.8574602266035714e-05, |
| "loss": 0.0608, |
| "mean_token_accuracy": 0.9810106307268143, |
| "step": 976 |
| }, |
| { |
| "epoch": 5.715542521994135, |
| "grad_norm": 0.5544850178756708, |
| "learning_rate": 1.854094246402402e-05, |
| "loss": 0.0821, |
| "mean_token_accuracy": 0.9750274196267128, |
| "step": 977 |
| }, |
| { |
| "epoch": 5.721407624633431, |
| "grad_norm": 0.6178456682643919, |
| "learning_rate": 1.8507295216994162e-05, |
| "loss": 0.0628, |
| "mean_token_accuracy": 0.9818970337510109, |
| "step": 978 |
| }, |
| { |
| "epoch": 5.7272727272727275, |
| "grad_norm": 0.5142270627968069, |
| "learning_rate": 1.8473660647072053e-05, |
| "loss": 0.0751, |
| "mean_token_accuracy": 0.9759154841303825, |
| "step": 979 |
| }, |
| { |
| "epoch": 5.733137829912024, |
| "grad_norm": 0.5021352568135602, |
| "learning_rate": 1.8440038876337597e-05, |
| "loss": 0.0677, |
| "mean_token_accuracy": 0.9790510535240173, |
| "step": 980 |
| }, |
| { |
| "epoch": 5.73900293255132, |
| "grad_norm": 0.5400003674319198, |
| "learning_rate": 1.8406430026824252e-05, |
| "loss": 0.0727, |
| "mean_token_accuracy": 0.976162277162075, |
| "step": 981 |
| }, |
| { |
| "epoch": 5.744868035190616, |
| "grad_norm": 0.6653802174420181, |
| "learning_rate": 1.837283422051855e-05, |
| "loss": 0.0708, |
| "mean_token_accuracy": 0.9791212901473045, |
| "step": 982 |
| }, |
| { |
| "epoch": 5.750733137829912, |
| "grad_norm": 0.5336134949253077, |
| "learning_rate": 1.8339251579359713e-05, |
| "loss": 0.0736, |
| "mean_token_accuracy": 0.9781318008899689, |
| "step": 983 |
| }, |
| { |
| "epoch": 5.756598240469208, |
| "grad_norm": 0.46625413729314213, |
| "learning_rate": 1.8305682225239167e-05, |
| "loss": 0.0691, |
| "mean_token_accuracy": 0.9801111742854118, |
| "step": 984 |
| }, |
| { |
| "epoch": 5.762463343108505, |
| "grad_norm": 0.7918478373807947, |
| "learning_rate": 1.8272126280000102e-05, |
| "loss": 0.0939, |
| "mean_token_accuracy": 0.9726522043347359, |
| "step": 985 |
| }, |
| { |
| "epoch": 5.768328445747801, |
| "grad_norm": 0.6659373208659011, |
| "learning_rate": 1.823858386543705e-05, |
| "loss": 0.0705, |
| "mean_token_accuracy": 0.9795112237334251, |
| "step": 986 |
| }, |
| { |
| "epoch": 5.774193548387097, |
| "grad_norm": 0.6937079334829163, |
| "learning_rate": 1.8205055103295434e-05, |
| "loss": 0.0821, |
| "mean_token_accuracy": 0.9742537960410118, |
| "step": 987 |
| }, |
| { |
| "epoch": 5.780058651026393, |
| "grad_norm": 0.5502882133997387, |
| "learning_rate": 1.8171540115271108e-05, |
| "loss": 0.074, |
| "mean_token_accuracy": 0.9749346300959587, |
| "step": 988 |
| }, |
| { |
| "epoch": 5.785923753665689, |
| "grad_norm": 0.5723847973030097, |
| "learning_rate": 1.813803902300995e-05, |
| "loss": 0.0767, |
| "mean_token_accuracy": 0.9775624573230743, |
| "step": 989 |
| }, |
| { |
| "epoch": 5.7917888563049855, |
| "grad_norm": 0.48559336446340945, |
| "learning_rate": 1.8104551948107395e-05, |
| "loss": 0.0689, |
| "mean_token_accuracy": 0.9818530306220055, |
| "step": 990 |
| }, |
| { |
| "epoch": 5.797653958944282, |
| "grad_norm": 0.43809664758169425, |
| "learning_rate": 1.8071079012107997e-05, |
| "loss": 0.0641, |
| "mean_token_accuracy": 0.9799798876047134, |
| "step": 991 |
| }, |
| { |
| "epoch": 5.803519061583578, |
| "grad_norm": 0.5446369284916783, |
| "learning_rate": 1.8037620336504993e-05, |
| "loss": 0.07, |
| "mean_token_accuracy": 0.9789104983210564, |
| "step": 992 |
| }, |
| { |
| "epoch": 5.809384164222874, |
| "grad_norm": 0.4733802615698864, |
| "learning_rate": 1.8004176042739877e-05, |
| "loss": 0.0732, |
| "mean_token_accuracy": 0.9795755222439766, |
| "step": 993 |
| }, |
| { |
| "epoch": 5.81524926686217, |
| "grad_norm": 0.6076123349283594, |
| "learning_rate": 1.797074625220191e-05, |
| "loss": 0.0715, |
| "mean_token_accuracy": 0.9788392633199692, |
| "step": 994 |
| }, |
| { |
| "epoch": 5.821114369501466, |
| "grad_norm": 0.6527755849095698, |
| "learning_rate": 1.7937331086227737e-05, |
| "loss": 0.0828, |
| "mean_token_accuracy": 0.9729266539216042, |
| "step": 995 |
| }, |
| { |
| "epoch": 5.826979472140763, |
| "grad_norm": 0.6987750283606413, |
| "learning_rate": 1.790393066610091e-05, |
| "loss": 0.0826, |
| "mean_token_accuracy": 0.9741199016571045, |
| "step": 996 |
| }, |
| { |
| "epoch": 5.832844574780059, |
| "grad_norm": 0.673623466884957, |
| "learning_rate": 1.787054511305148e-05, |
| "loss": 0.0852, |
| "mean_token_accuracy": 0.9762526527047157, |
| "step": 997 |
| }, |
| { |
| "epoch": 5.838709677419355, |
| "grad_norm": 0.6730592209203903, |
| "learning_rate": 1.7837174548255504e-05, |
| "loss": 0.075, |
| "mean_token_accuracy": 0.9776707738637924, |
| "step": 998 |
| }, |
| { |
| "epoch": 5.844574780058651, |
| "grad_norm": 0.45848872494150783, |
| "learning_rate": 1.7803819092834668e-05, |
| "loss": 0.0687, |
| "mean_token_accuracy": 0.9793645292520523, |
| "step": 999 |
| }, |
| { |
| "epoch": 5.850439882697947, |
| "grad_norm": 0.7023448385378745, |
| "learning_rate": 1.7770478867855797e-05, |
| "loss": 0.0761, |
| "mean_token_accuracy": 0.9792755618691444, |
| "step": 1000 |
| }, |
| { |
| "epoch": 5.8563049853372435, |
| "grad_norm": 0.5175131551283948, |
| "learning_rate": 1.7737153994330437e-05, |
| "loss": 0.0865, |
| "mean_token_accuracy": 0.9750565141439438, |
| "step": 1001 |
| }, |
| { |
| "epoch": 5.86217008797654, |
| "grad_norm": 0.6262907861375655, |
| "learning_rate": 1.7703844593214427e-05, |
| "loss": 0.0628, |
| "mean_token_accuracy": 0.9800690039992332, |
| "step": 1002 |
| }, |
| { |
| "epoch": 5.868035190615836, |
| "grad_norm": 0.44221893487398767, |
| "learning_rate": 1.7670550785407444e-05, |
| "loss": 0.0572, |
| "mean_token_accuracy": 0.9825234487652779, |
| "step": 1003 |
| }, |
| { |
| "epoch": 5.873900293255132, |
| "grad_norm": 0.4987557150663674, |
| "learning_rate": 1.7637272691752548e-05, |
| "loss": 0.0771, |
| "mean_token_accuracy": 0.975949339568615, |
| "step": 1004 |
| }, |
| { |
| "epoch": 5.879765395894428, |
| "grad_norm": 0.5103138713112173, |
| "learning_rate": 1.7604010433035793e-05, |
| "loss": 0.0816, |
| "mean_token_accuracy": 0.9757112711668015, |
| "step": 1005 |
| }, |
| { |
| "epoch": 5.885630498533724, |
| "grad_norm": 0.5757247782780586, |
| "learning_rate": 1.7570764129985747e-05, |
| "loss": 0.0714, |
| "mean_token_accuracy": 0.9774347543716431, |
| "step": 1006 |
| }, |
| { |
| "epoch": 5.891495601173021, |
| "grad_norm": 0.5735144828579501, |
| "learning_rate": 1.7537533903273055e-05, |
| "loss": 0.0672, |
| "mean_token_accuracy": 0.9788115695118904, |
| "step": 1007 |
| }, |
| { |
| "epoch": 5.897360703812317, |
| "grad_norm": 0.5276596129193636, |
| "learning_rate": 1.7504319873510014e-05, |
| "loss": 0.0816, |
| "mean_token_accuracy": 0.9764761105179787, |
| "step": 1008 |
| }, |
| { |
| "epoch": 5.903225806451613, |
| "grad_norm": 0.5934904035247996, |
| "learning_rate": 1.7471122161250153e-05, |
| "loss": 0.0829, |
| "mean_token_accuracy": 0.9750788882374763, |
| "step": 1009 |
| }, |
| { |
| "epoch": 5.909090909090909, |
| "grad_norm": 0.7581092342018619, |
| "learning_rate": 1.743794088698775e-05, |
| "loss": 0.0792, |
| "mean_token_accuracy": 0.9779830947518349, |
| "step": 1010 |
| }, |
| { |
| "epoch": 5.914956011730205, |
| "grad_norm": 0.46297684327502575, |
| "learning_rate": 1.7404776171157428e-05, |
| "loss": 0.0764, |
| "mean_token_accuracy": 0.9766190350055695, |
| "step": 1011 |
| }, |
| { |
| "epoch": 5.9208211143695015, |
| "grad_norm": 0.5333324483571935, |
| "learning_rate": 1.7371628134133716e-05, |
| "loss": 0.0875, |
| "mean_token_accuracy": 0.9737675860524178, |
| "step": 1012 |
| }, |
| { |
| "epoch": 5.926686217008798, |
| "grad_norm": 0.6875722103361684, |
| "learning_rate": 1.73384968962306e-05, |
| "loss": 0.0731, |
| "mean_token_accuracy": 0.9765476137399673, |
| "step": 1013 |
| }, |
| { |
| "epoch": 5.932551319648094, |
| "grad_norm": 0.5498994219533493, |
| "learning_rate": 1.7305382577701088e-05, |
| "loss": 0.0786, |
| "mean_token_accuracy": 0.9759142473340034, |
| "step": 1014 |
| }, |
| { |
| "epoch": 5.93841642228739, |
| "grad_norm": 0.5849125695448263, |
| "learning_rate": 1.7272285298736787e-05, |
| "loss": 0.069, |
| "mean_token_accuracy": 0.9774723574519157, |
| "step": 1015 |
| }, |
| { |
| "epoch": 5.944281524926686, |
| "grad_norm": 0.5757882291325982, |
| "learning_rate": 1.7239205179467453e-05, |
| "loss": 0.0811, |
| "mean_token_accuracy": 0.9770863503217697, |
| "step": 1016 |
| }, |
| { |
| "epoch": 5.9501466275659824, |
| "grad_norm": 0.6063631339216475, |
| "learning_rate": 1.720614233996056e-05, |
| "loss": 0.0939, |
| "mean_token_accuracy": 0.9728550314903259, |
| "step": 1017 |
| }, |
| { |
| "epoch": 5.956011730205279, |
| "grad_norm": 0.7121493997252688, |
| "learning_rate": 1.7173096900220852e-05, |
| "loss": 0.0716, |
| "mean_token_accuracy": 0.9775163680315018, |
| "step": 1018 |
| }, |
| { |
| "epoch": 5.961876832844575, |
| "grad_norm": 0.5713158877121456, |
| "learning_rate": 1.7140068980189943e-05, |
| "loss": 0.0855, |
| "mean_token_accuracy": 0.9739682152867317, |
| "step": 1019 |
| }, |
| { |
| "epoch": 5.967741935483871, |
| "grad_norm": 0.5823469676430719, |
| "learning_rate": 1.710705869974583e-05, |
| "loss": 0.081, |
| "mean_token_accuracy": 0.9755722358822823, |
| "step": 1020 |
| }, |
| { |
| "epoch": 5.973607038123167, |
| "grad_norm": 0.5279782610228617, |
| "learning_rate": 1.7074066178702512e-05, |
| "loss": 0.0579, |
| "mean_token_accuracy": 0.980400986969471, |
| "step": 1021 |
| }, |
| { |
| "epoch": 5.979472140762463, |
| "grad_norm": 0.5112568511100889, |
| "learning_rate": 1.7041091536809506e-05, |
| "loss": 0.0688, |
| "mean_token_accuracy": 0.9795641005039215, |
| "step": 1022 |
| }, |
| { |
| "epoch": 5.9853372434017595, |
| "grad_norm": 0.5481954671151484, |
| "learning_rate": 1.7008134893751446e-05, |
| "loss": 0.0703, |
| "mean_token_accuracy": 0.980062872171402, |
| "step": 1023 |
| }, |
| { |
| "epoch": 5.991202346041056, |
| "grad_norm": 0.5077431608410231, |
| "learning_rate": 1.697519636914765e-05, |
| "loss": 0.0627, |
| "mean_token_accuracy": 0.9807908609509468, |
| "step": 1024 |
| }, |
| { |
| "epoch": 5.997067448680352, |
| "grad_norm": 0.5942347658987599, |
| "learning_rate": 1.6942276082551634e-05, |
| "loss": 0.0818, |
| "mean_token_accuracy": 0.9745178669691086, |
| "step": 1025 |
| }, |
| { |
| "epoch": 6.0, |
| "grad_norm": 0.9379871683914409, |
| "learning_rate": 1.6909374153450762e-05, |
| "loss": 0.0758, |
| "mean_token_accuracy": 0.9805418103933334, |
| "step": 1026 |
| }, |
| { |
| "epoch": 6.005865102639296, |
| "grad_norm": 0.41625170627035957, |
| "learning_rate": 1.6876490701265736e-05, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9833519533276558, |
| "step": 1027 |
| }, |
| { |
| "epoch": 6.011730205278592, |
| "grad_norm": 0.5625854280142911, |
| "learning_rate": 1.684362584535022e-05, |
| "loss": 0.0651, |
| "mean_token_accuracy": 0.980522520840168, |
| "step": 1028 |
| }, |
| { |
| "epoch": 6.0175953079178885, |
| "grad_norm": 0.4589596228148454, |
| "learning_rate": 1.6810779704990358e-05, |
| "loss": 0.064, |
| "mean_token_accuracy": 0.980443462729454, |
| "step": 1029 |
| }, |
| { |
| "epoch": 6.023460410557185, |
| "grad_norm": 0.4710321685654739, |
| "learning_rate": 1.677795239940438e-05, |
| "loss": 0.0526, |
| "mean_token_accuracy": 0.9833626300096512, |
| "step": 1030 |
| }, |
| { |
| "epoch": 6.029325513196481, |
| "grad_norm": 0.47115827056288706, |
| "learning_rate": 1.674514404774214e-05, |
| "loss": 0.0682, |
| "mean_token_accuracy": 0.9806164056062698, |
| "step": 1031 |
| }, |
| { |
| "epoch": 6.035190615835777, |
| "grad_norm": 0.5007221500943095, |
| "learning_rate": 1.671235476908471e-05, |
| "loss": 0.0638, |
| "mean_token_accuracy": 0.9794806391000748, |
| "step": 1032 |
| }, |
| { |
| "epoch": 6.041055718475073, |
| "grad_norm": 0.46139880948373124, |
| "learning_rate": 1.6679584682443924e-05, |
| "loss": 0.059, |
| "mean_token_accuracy": 0.9817759990692139, |
| "step": 1033 |
| }, |
| { |
| "epoch": 6.0469208211143695, |
| "grad_norm": 0.47833827548137275, |
| "learning_rate": 1.6646833906761965e-05, |
| "loss": 0.061, |
| "mean_token_accuracy": 0.9800918996334076, |
| "step": 1034 |
| }, |
| { |
| "epoch": 6.052785923753666, |
| "grad_norm": 0.4157611090168959, |
| "learning_rate": 1.661410256091092e-05, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9824439659714699, |
| "step": 1035 |
| }, |
| { |
| "epoch": 6.058651026392962, |
| "grad_norm": 0.5513319881808698, |
| "learning_rate": 1.658139076369236e-05, |
| "loss": 0.0726, |
| "mean_token_accuracy": 0.9797214195132256, |
| "step": 1036 |
| }, |
| { |
| "epoch": 6.064516129032258, |
| "grad_norm": 0.5997799870227748, |
| "learning_rate": 1.6548698633836893e-05, |
| "loss": 0.0641, |
| "mean_token_accuracy": 0.9777173176407814, |
| "step": 1037 |
| }, |
| { |
| "epoch": 6.070381231671554, |
| "grad_norm": 0.47333625171664667, |
| "learning_rate": 1.6516026290003746e-05, |
| "loss": 0.0584, |
| "mean_token_accuracy": 0.9834053292870522, |
| "step": 1038 |
| }, |
| { |
| "epoch": 6.07624633431085, |
| "grad_norm": 0.4424032802775701, |
| "learning_rate": 1.6483373850780328e-05, |
| "loss": 0.0613, |
| "mean_token_accuracy": 0.9812067598104477, |
| "step": 1039 |
| }, |
| { |
| "epoch": 6.0821114369501466, |
| "grad_norm": 0.3632789659233089, |
| "learning_rate": 1.645074143468181e-05, |
| "loss": 0.055, |
| "mean_token_accuracy": 0.9817801341414452, |
| "step": 1040 |
| }, |
| { |
| "epoch": 6.087976539589443, |
| "grad_norm": 0.6360564965039187, |
| "learning_rate": 1.6418129160150692e-05, |
| "loss": 0.07, |
| "mean_token_accuracy": 0.978939987719059, |
| "step": 1041 |
| }, |
| { |
| "epoch": 6.093841642228739, |
| "grad_norm": 0.42344045495487054, |
| "learning_rate": 1.6385537145556346e-05, |
| "loss": 0.0548, |
| "mean_token_accuracy": 0.9843300357460976, |
| "step": 1042 |
| }, |
| { |
| "epoch": 6.099706744868035, |
| "grad_norm": 0.45984406695397945, |
| "learning_rate": 1.6352965509194634e-05, |
| "loss": 0.0556, |
| "mean_token_accuracy": 0.9822628200054169, |
| "step": 1043 |
| }, |
| { |
| "epoch": 6.105571847507331, |
| "grad_norm": 0.4323284440143135, |
| "learning_rate": 1.6320414369287427e-05, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9819125235080719, |
| "step": 1044 |
| }, |
| { |
| "epoch": 6.1114369501466275, |
| "grad_norm": 0.47646310093981065, |
| "learning_rate": 1.6287883843982223e-05, |
| "loss": 0.063, |
| "mean_token_accuracy": 0.9816362336277962, |
| "step": 1045 |
| }, |
| { |
| "epoch": 6.117302052785924, |
| "grad_norm": 0.6185661703570164, |
| "learning_rate": 1.625537405135169e-05, |
| "loss": 0.0797, |
| "mean_token_accuracy": 0.9750150516629219, |
| "step": 1046 |
| }, |
| { |
| "epoch": 6.12316715542522, |
| "grad_norm": 0.556525046678587, |
| "learning_rate": 1.622288510939325e-05, |
| "loss": 0.0678, |
| "mean_token_accuracy": 0.9790779277682304, |
| "step": 1047 |
| }, |
| { |
| "epoch": 6.129032258064516, |
| "grad_norm": 0.6626182528269104, |
| "learning_rate": 1.619041713602864e-05, |
| "loss": 0.0774, |
| "mean_token_accuracy": 0.9791093915700912, |
| "step": 1048 |
| }, |
| { |
| "epoch": 6.134897360703812, |
| "grad_norm": 0.5662890345517463, |
| "learning_rate": 1.6157970249103484e-05, |
| "loss": 0.0694, |
| "mean_token_accuracy": 0.9782344624400139, |
| "step": 1049 |
| }, |
| { |
| "epoch": 6.140762463343108, |
| "grad_norm": 0.5645945744859859, |
| "learning_rate": 1.612554456638688e-05, |
| "loss": 0.0721, |
| "mean_token_accuracy": 0.9771447703242302, |
| "step": 1050 |
| }, |
| { |
| "epoch": 6.146627565982405, |
| "grad_norm": 0.6274940318220598, |
| "learning_rate": 1.6093140205570962e-05, |
| "loss": 0.0753, |
| "mean_token_accuracy": 0.976951114833355, |
| "step": 1051 |
| }, |
| { |
| "epoch": 6.152492668621701, |
| "grad_norm": 0.4898064028531864, |
| "learning_rate": 1.6060757284270474e-05, |
| "loss": 0.0724, |
| "mean_token_accuracy": 0.9772902429103851, |
| "step": 1052 |
| }, |
| { |
| "epoch": 6.158357771260997, |
| "grad_norm": 0.4672713017679259, |
| "learning_rate": 1.6028395920022336e-05, |
| "loss": 0.0549, |
| "mean_token_accuracy": 0.9808409512042999, |
| "step": 1053 |
| }, |
| { |
| "epoch": 6.164222873900293, |
| "grad_norm": 0.5108336954770324, |
| "learning_rate": 1.5996056230285237e-05, |
| "loss": 0.0622, |
| "mean_token_accuracy": 0.9801520705223083, |
| "step": 1054 |
| }, |
| { |
| "epoch": 6.170087976539589, |
| "grad_norm": 0.41505936098873364, |
| "learning_rate": 1.596373833243918e-05, |
| "loss": 0.0617, |
| "mean_token_accuracy": 0.978906974196434, |
| "step": 1055 |
| }, |
| { |
| "epoch": 6.1759530791788855, |
| "grad_norm": 0.6576675728809507, |
| "learning_rate": 1.593144234378509e-05, |
| "loss": 0.0687, |
| "mean_token_accuracy": 0.978096179664135, |
| "step": 1056 |
| }, |
| { |
| "epoch": 6.181818181818182, |
| "grad_norm": 0.4119939746598458, |
| "learning_rate": 1.5899168381544362e-05, |
| "loss": 0.0592, |
| "mean_token_accuracy": 0.9820670709013939, |
| "step": 1057 |
| }, |
| { |
| "epoch": 6.187683284457478, |
| "grad_norm": 0.38406056548322093, |
| "learning_rate": 1.5866916562858444e-05, |
| "loss": 0.0579, |
| "mean_token_accuracy": 0.9805167242884636, |
| "step": 1058 |
| }, |
| { |
| "epoch": 6.193548387096774, |
| "grad_norm": 0.42331392587902517, |
| "learning_rate": 1.5834687004788406e-05, |
| "loss": 0.0636, |
| "mean_token_accuracy": 0.9793353825807571, |
| "step": 1059 |
| }, |
| { |
| "epoch": 6.19941348973607, |
| "grad_norm": 0.5444805201415094, |
| "learning_rate": 1.5802479824314537e-05, |
| "loss": 0.0663, |
| "mean_token_accuracy": 0.9753135293722153, |
| "step": 1060 |
| }, |
| { |
| "epoch": 6.205278592375366, |
| "grad_norm": 0.5768143603726456, |
| "learning_rate": 1.5770295138335896e-05, |
| "loss": 0.0633, |
| "mean_token_accuracy": 0.9811239168047905, |
| "step": 1061 |
| }, |
| { |
| "epoch": 6.211143695014663, |
| "grad_norm": 0.5086983780677398, |
| "learning_rate": 1.573813306366988e-05, |
| "loss": 0.0591, |
| "mean_token_accuracy": 0.9830747321248055, |
| "step": 1062 |
| }, |
| { |
| "epoch": 6.217008797653959, |
| "grad_norm": 0.49230527321663736, |
| "learning_rate": 1.5705993717051838e-05, |
| "loss": 0.0721, |
| "mean_token_accuracy": 0.976339653134346, |
| "step": 1063 |
| }, |
| { |
| "epoch": 6.222873900293255, |
| "grad_norm": 0.6986236541493921, |
| "learning_rate": 1.567387721513462e-05, |
| "loss": 0.0698, |
| "mean_token_accuracy": 0.9772974252700806, |
| "step": 1064 |
| }, |
| { |
| "epoch": 6.228739002932551, |
| "grad_norm": 0.42866239632289127, |
| "learning_rate": 1.5641783674488155e-05, |
| "loss": 0.0613, |
| "mean_token_accuracy": 0.9813599810004234, |
| "step": 1065 |
| }, |
| { |
| "epoch": 6.234604105571847, |
| "grad_norm": 0.3950681428606381, |
| "learning_rate": 1.5609713211599035e-05, |
| "loss": 0.0691, |
| "mean_token_accuracy": 0.9793985933065414, |
| "step": 1066 |
| }, |
| { |
| "epoch": 6.2404692082111435, |
| "grad_norm": 0.49277456950892906, |
| "learning_rate": 1.557766594287009e-05, |
| "loss": 0.0722, |
| "mean_token_accuracy": 0.9784559234976768, |
| "step": 1067 |
| }, |
| { |
| "epoch": 6.24633431085044, |
| "grad_norm": 0.5974984569237553, |
| "learning_rate": 1.554564198461996e-05, |
| "loss": 0.0773, |
| "mean_token_accuracy": 0.9730274602770805, |
| "step": 1068 |
| }, |
| { |
| "epoch": 6.252199413489736, |
| "grad_norm": 0.5298257752867717, |
| "learning_rate": 1.5513641453082672e-05, |
| "loss": 0.0652, |
| "mean_token_accuracy": 0.9812508746981621, |
| "step": 1069 |
| }, |
| { |
| "epoch": 6.258064516129032, |
| "grad_norm": 0.4929415822759338, |
| "learning_rate": 1.5481664464407246e-05, |
| "loss": 0.0619, |
| "mean_token_accuracy": 0.9832079485058784, |
| "step": 1070 |
| }, |
| { |
| "epoch": 6.263929618768328, |
| "grad_norm": 0.45181732363229793, |
| "learning_rate": 1.5449711134657224e-05, |
| "loss": 0.0687, |
| "mean_token_accuracy": 0.9794114828109741, |
| "step": 1071 |
| }, |
| { |
| "epoch": 6.269794721407624, |
| "grad_norm": 0.4301733956357397, |
| "learning_rate": 1.5417781579810296e-05, |
| "loss": 0.0676, |
| "mean_token_accuracy": 0.9799632504582405, |
| "step": 1072 |
| }, |
| { |
| "epoch": 6.275659824046921, |
| "grad_norm": 0.45380792420397464, |
| "learning_rate": 1.5385875915757846e-05, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9798811078071594, |
| "step": 1073 |
| }, |
| { |
| "epoch": 6.281524926686217, |
| "grad_norm": 0.4889342003194968, |
| "learning_rate": 1.535399425830456e-05, |
| "loss": 0.062, |
| "mean_token_accuracy": 0.9813030734658241, |
| "step": 1074 |
| }, |
| { |
| "epoch": 6.287390029325513, |
| "grad_norm": 0.49157956071263753, |
| "learning_rate": 1.5322136723167957e-05, |
| "loss": 0.0678, |
| "mean_token_accuracy": 0.9778005704283714, |
| "step": 1075 |
| }, |
| { |
| "epoch": 6.293255131964809, |
| "grad_norm": 0.36148981245399664, |
| "learning_rate": 1.5290303425978036e-05, |
| "loss": 0.0578, |
| "mean_token_accuracy": 0.9815500751137733, |
| "step": 1076 |
| }, |
| { |
| "epoch": 6.299120234604105, |
| "grad_norm": 0.5078662394006407, |
| "learning_rate": 1.525849448227681e-05, |
| "loss": 0.0649, |
| "mean_token_accuracy": 0.9803764596581459, |
| "step": 1077 |
| }, |
| { |
| "epoch": 6.3049853372434015, |
| "grad_norm": 0.44875417467973094, |
| "learning_rate": 1.5226710007517894e-05, |
| "loss": 0.0728, |
| "mean_token_accuracy": 0.977813683450222, |
| "step": 1078 |
| }, |
| { |
| "epoch": 6.310850439882698, |
| "grad_norm": 0.43324133716142355, |
| "learning_rate": 1.5194950117066097e-05, |
| "loss": 0.0594, |
| "mean_token_accuracy": 0.979973241686821, |
| "step": 1079 |
| }, |
| { |
| "epoch": 6.316715542521994, |
| "grad_norm": 0.4151240808065745, |
| "learning_rate": 1.5163214926196995e-05, |
| "loss": 0.0701, |
| "mean_token_accuracy": 0.9771018698811531, |
| "step": 1080 |
| }, |
| { |
| "epoch": 6.32258064516129, |
| "grad_norm": 0.4555302943525885, |
| "learning_rate": 1.5131504550096515e-05, |
| "loss": 0.0669, |
| "mean_token_accuracy": 0.978136457502842, |
| "step": 1081 |
| }, |
| { |
| "epoch": 6.328445747800586, |
| "grad_norm": 0.6519430545614232, |
| "learning_rate": 1.5099819103860504e-05, |
| "loss": 0.0629, |
| "mean_token_accuracy": 0.9802589863538742, |
| "step": 1082 |
| }, |
| { |
| "epoch": 6.334310850439882, |
| "grad_norm": 0.4967967194966807, |
| "learning_rate": 1.5068158702494348e-05, |
| "loss": 0.0574, |
| "mean_token_accuracy": 0.9817872196435928, |
| "step": 1083 |
| }, |
| { |
| "epoch": 6.340175953079179, |
| "grad_norm": 0.44345834591558786, |
| "learning_rate": 1.5036523460912511e-05, |
| "loss": 0.0564, |
| "mean_token_accuracy": 0.9838694632053375, |
| "step": 1084 |
| }, |
| { |
| "epoch": 6.346041055718475, |
| "grad_norm": 0.4715058201780604, |
| "learning_rate": 1.5004913493938147e-05, |
| "loss": 0.0634, |
| "mean_token_accuracy": 0.9793806448578835, |
| "step": 1085 |
| }, |
| { |
| "epoch": 6.351906158357771, |
| "grad_norm": 0.5443439102135127, |
| "learning_rate": 1.4973328916302667e-05, |
| "loss": 0.0737, |
| "mean_token_accuracy": 0.9753685146570206, |
| "step": 1086 |
| }, |
| { |
| "epoch": 6.357771260997067, |
| "grad_norm": 0.5384867941568995, |
| "learning_rate": 1.4941769842645335e-05, |
| "loss": 0.0658, |
| "mean_token_accuracy": 0.9772131741046906, |
| "step": 1087 |
| }, |
| { |
| "epoch": 6.363636363636363, |
| "grad_norm": 0.4575840235851356, |
| "learning_rate": 1.4910236387512837e-05, |
| "loss": 0.0635, |
| "mean_token_accuracy": 0.9794055670499802, |
| "step": 1088 |
| }, |
| { |
| "epoch": 6.3695014662756595, |
| "grad_norm": 0.40669986519786894, |
| "learning_rate": 1.487872866535888e-05, |
| "loss": 0.0586, |
| "mean_token_accuracy": 0.9820075482130051, |
| "step": 1089 |
| }, |
| { |
| "epoch": 6.375366568914956, |
| "grad_norm": 0.5175296953254443, |
| "learning_rate": 1.4847246790543773e-05, |
| "loss": 0.0618, |
| "mean_token_accuracy": 0.9794023782014847, |
| "step": 1090 |
| }, |
| { |
| "epoch": 6.381231671554252, |
| "grad_norm": 0.4715228486396621, |
| "learning_rate": 1.4815790877334007e-05, |
| "loss": 0.0566, |
| "mean_token_accuracy": 0.9806531295180321, |
| "step": 1091 |
| }, |
| { |
| "epoch": 6.387096774193548, |
| "grad_norm": 0.5288849918324824, |
| "learning_rate": 1.4784361039901844e-05, |
| "loss": 0.0684, |
| "mean_token_accuracy": 0.9798084422945976, |
| "step": 1092 |
| }, |
| { |
| "epoch": 6.392961876832844, |
| "grad_norm": 0.4773056484959844, |
| "learning_rate": 1.47529573923249e-05, |
| "loss": 0.0602, |
| "mean_token_accuracy": 0.9808967262506485, |
| "step": 1093 |
| }, |
| { |
| "epoch": 6.39882697947214, |
| "grad_norm": 0.5215802713663142, |
| "learning_rate": 1.472158004858573e-05, |
| "loss": 0.0623, |
| "mean_token_accuracy": 0.9784137681126595, |
| "step": 1094 |
| }, |
| { |
| "epoch": 6.404692082111437, |
| "grad_norm": 0.594013580780295, |
| "learning_rate": 1.4690229122571419e-05, |
| "loss": 0.0749, |
| "mean_token_accuracy": 0.9752750173211098, |
| "step": 1095 |
| }, |
| { |
| "epoch": 6.410557184750733, |
| "grad_norm": 0.45076680417605564, |
| "learning_rate": 1.4658904728073169e-05, |
| "loss": 0.0579, |
| "mean_token_accuracy": 0.9810296148061752, |
| "step": 1096 |
| }, |
| { |
| "epoch": 6.416422287390029, |
| "grad_norm": 0.4686177977484101, |
| "learning_rate": 1.4627606978785878e-05, |
| "loss": 0.0653, |
| "mean_token_accuracy": 0.9801298379898071, |
| "step": 1097 |
| }, |
| { |
| "epoch": 6.422287390029325, |
| "grad_norm": 0.473498323409717, |
| "learning_rate": 1.4596335988307736e-05, |
| "loss": 0.0715, |
| "mean_token_accuracy": 0.9790318608283997, |
| "step": 1098 |
| }, |
| { |
| "epoch": 6.428152492668621, |
| "grad_norm": 0.3829985516560962, |
| "learning_rate": 1.4565091870139814e-05, |
| "loss": 0.0544, |
| "mean_token_accuracy": 0.9823189005255699, |
| "step": 1099 |
| }, |
| { |
| "epoch": 6.4340175953079175, |
| "grad_norm": 0.6985399778014056, |
| "learning_rate": 1.4533874737685638e-05, |
| "loss": 0.0832, |
| "mean_token_accuracy": 0.9754642993211746, |
| "step": 1100 |
| }, |
| { |
| "epoch": 6.439882697947214, |
| "grad_norm": 0.47408544720881507, |
| "learning_rate": 1.450268470425079e-05, |
| "loss": 0.0654, |
| "mean_token_accuracy": 0.9818674698472023, |
| "step": 1101 |
| }, |
| { |
| "epoch": 6.44574780058651, |
| "grad_norm": 0.4132311752624299, |
| "learning_rate": 1.4471521883042492e-05, |
| "loss": 0.0595, |
| "mean_token_accuracy": 0.9819449707865715, |
| "step": 1102 |
| }, |
| { |
| "epoch": 6.451612903225806, |
| "grad_norm": 0.5282825996755145, |
| "learning_rate": 1.4440386387169207e-05, |
| "loss": 0.0676, |
| "mean_token_accuracy": 0.9805441722273827, |
| "step": 1103 |
| }, |
| { |
| "epoch": 6.457478005865102, |
| "grad_norm": 0.5093444055532642, |
| "learning_rate": 1.4409278329640218e-05, |
| "loss": 0.0696, |
| "mean_token_accuracy": 0.9782739505171776, |
| "step": 1104 |
| }, |
| { |
| "epoch": 6.463343108504398, |
| "grad_norm": 0.40896293549583035, |
| "learning_rate": 1.4378197823365186e-05, |
| "loss": 0.0653, |
| "mean_token_accuracy": 0.980708159506321, |
| "step": 1105 |
| }, |
| { |
| "epoch": 6.469208211143695, |
| "grad_norm": 0.5368030574271555, |
| "learning_rate": 1.4347144981153807e-05, |
| "loss": 0.0763, |
| "mean_token_accuracy": 0.9757898151874542, |
| "step": 1106 |
| }, |
| { |
| "epoch": 6.475073313782991, |
| "grad_norm": 0.33963125667874944, |
| "learning_rate": 1.4316119915715363e-05, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.9820173308253288, |
| "step": 1107 |
| }, |
| { |
| "epoch": 6.480938416422287, |
| "grad_norm": 0.5695780400518249, |
| "learning_rate": 1.42851227396583e-05, |
| "loss": 0.0724, |
| "mean_token_accuracy": 0.9782184883952141, |
| "step": 1108 |
| }, |
| { |
| "epoch": 6.486803519061583, |
| "grad_norm": 0.4686135496909153, |
| "learning_rate": 1.4254153565489861e-05, |
| "loss": 0.0694, |
| "mean_token_accuracy": 0.9768488556146622, |
| "step": 1109 |
| }, |
| { |
| "epoch": 6.492668621700879, |
| "grad_norm": 0.4444438012165556, |
| "learning_rate": 1.4223212505615634e-05, |
| "loss": 0.0636, |
| "mean_token_accuracy": 0.981613002717495, |
| "step": 1110 |
| }, |
| { |
| "epoch": 6.4985337243401755, |
| "grad_norm": 0.4325209107027411, |
| "learning_rate": 1.4192299672339167e-05, |
| "loss": 0.0583, |
| "mean_token_accuracy": 0.9803315699100494, |
| "step": 1111 |
| }, |
| { |
| "epoch": 6.504398826979472, |
| "grad_norm": 0.5013136187861221, |
| "learning_rate": 1.4161415177861568e-05, |
| "loss": 0.0656, |
| "mean_token_accuracy": 0.9776363521814346, |
| "step": 1112 |
| }, |
| { |
| "epoch": 6.510263929618768, |
| "grad_norm": 0.3588687094600502, |
| "learning_rate": 1.4130559134281074e-05, |
| "loss": 0.0566, |
| "mean_token_accuracy": 0.9824674054980278, |
| "step": 1113 |
| }, |
| { |
| "epoch": 6.516129032258064, |
| "grad_norm": 0.5881958388687754, |
| "learning_rate": 1.4099731653592668e-05, |
| "loss": 0.0662, |
| "mean_token_accuracy": 0.9796800762414932, |
| "step": 1114 |
| }, |
| { |
| "epoch": 6.52199413489736, |
| "grad_norm": 0.5935746416719274, |
| "learning_rate": 1.406893284768764e-05, |
| "loss": 0.0772, |
| "mean_token_accuracy": 0.9757950976490974, |
| "step": 1115 |
| }, |
| { |
| "epoch": 6.527859237536656, |
| "grad_norm": 0.6106151329018146, |
| "learning_rate": 1.4038162828353223e-05, |
| "loss": 0.0767, |
| "mean_token_accuracy": 0.9741943404078484, |
| "step": 1116 |
| }, |
| { |
| "epoch": 6.533724340175953, |
| "grad_norm": 0.5190014325868866, |
| "learning_rate": 1.4007421707272167e-05, |
| "loss": 0.0673, |
| "mean_token_accuracy": 0.9787988364696503, |
| "step": 1117 |
| }, |
| { |
| "epoch": 6.539589442815249, |
| "grad_norm": 0.47127351993031963, |
| "learning_rate": 1.3976709596022313e-05, |
| "loss": 0.0648, |
| "mean_token_accuracy": 0.9789974242448807, |
| "step": 1118 |
| }, |
| { |
| "epoch": 6.545454545454545, |
| "grad_norm": 0.5045724179125629, |
| "learning_rate": 1.3946026606076232e-05, |
| "loss": 0.0682, |
| "mean_token_accuracy": 0.9812200665473938, |
| "step": 1119 |
| }, |
| { |
| "epoch": 6.551319648093841, |
| "grad_norm": 0.5047482899527544, |
| "learning_rate": 1.3915372848800784e-05, |
| "loss": 0.0631, |
| "mean_token_accuracy": 0.981167197227478, |
| "step": 1120 |
| }, |
| { |
| "epoch": 6.557184750733137, |
| "grad_norm": 0.4417462758826341, |
| "learning_rate": 1.388474843545672e-05, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.9817292094230652, |
| "step": 1121 |
| }, |
| { |
| "epoch": 6.563049853372434, |
| "grad_norm": 0.465911409594195, |
| "learning_rate": 1.3854153477198305e-05, |
| "loss": 0.0776, |
| "mean_token_accuracy": 0.9728592708706856, |
| "step": 1122 |
| }, |
| { |
| "epoch": 6.568914956011731, |
| "grad_norm": 0.3971582032508505, |
| "learning_rate": 1.3823588085072865e-05, |
| "loss": 0.0595, |
| "mean_token_accuracy": 0.9787317886948586, |
| "step": 1123 |
| }, |
| { |
| "epoch": 6.574780058651027, |
| "grad_norm": 0.5127794810231362, |
| "learning_rate": 1.3793052370020441e-05, |
| "loss": 0.0732, |
| "mean_token_accuracy": 0.9790031611919403, |
| "step": 1124 |
| }, |
| { |
| "epoch": 6.580645161290323, |
| "grad_norm": 0.5745357026449952, |
| "learning_rate": 1.3762546442873343e-05, |
| "loss": 0.0706, |
| "mean_token_accuracy": 0.9805065914988518, |
| "step": 1125 |
| }, |
| { |
| "epoch": 6.586510263929619, |
| "grad_norm": 0.5035709411175152, |
| "learning_rate": 1.3732070414355766e-05, |
| "loss": 0.069, |
| "mean_token_accuracy": 0.9779914170503616, |
| "step": 1126 |
| }, |
| { |
| "epoch": 6.592375366568915, |
| "grad_norm": 0.49677571366761347, |
| "learning_rate": 1.370162439508339e-05, |
| "loss": 0.0628, |
| "mean_token_accuracy": 0.9810082614421844, |
| "step": 1127 |
| }, |
| { |
| "epoch": 6.5982404692082115, |
| "grad_norm": 0.4300492329897516, |
| "learning_rate": 1.367120849556296e-05, |
| "loss": 0.0628, |
| "mean_token_accuracy": 0.9806487932801247, |
| "step": 1128 |
| }, |
| { |
| "epoch": 6.604105571847508, |
| "grad_norm": 0.3446649815945842, |
| "learning_rate": 1.3640822826191907e-05, |
| "loss": 0.0504, |
| "mean_token_accuracy": 0.9841926470398903, |
| "step": 1129 |
| }, |
| { |
| "epoch": 6.609970674486804, |
| "grad_norm": 0.49179030281896613, |
| "learning_rate": 1.361046749725794e-05, |
| "loss": 0.0683, |
| "mean_token_accuracy": 0.9768203571438789, |
| "step": 1130 |
| }, |
| { |
| "epoch": 6.6158357771261, |
| "grad_norm": 0.3978124224369657, |
| "learning_rate": 1.3580142618938647e-05, |
| "loss": 0.0538, |
| "mean_token_accuracy": 0.9839759469032288, |
| "step": 1131 |
| }, |
| { |
| "epoch": 6.621700879765396, |
| "grad_norm": 0.4851032507862954, |
| "learning_rate": 1.354984830130109e-05, |
| "loss": 0.0703, |
| "mean_token_accuracy": 0.9769893512129784, |
| "step": 1132 |
| }, |
| { |
| "epoch": 6.627565982404692, |
| "grad_norm": 0.46896793554361244, |
| "learning_rate": 1.3519584654301401e-05, |
| "loss": 0.0669, |
| "mean_token_accuracy": 0.979073740541935, |
| "step": 1133 |
| }, |
| { |
| "epoch": 6.633431085043989, |
| "grad_norm": 0.4433020660589846, |
| "learning_rate": 1.3489351787784398e-05, |
| "loss": 0.0641, |
| "mean_token_accuracy": 0.9796401932835579, |
| "step": 1134 |
| }, |
| { |
| "epoch": 6.639296187683285, |
| "grad_norm": 0.6547657796915215, |
| "learning_rate": 1.3459149811483178e-05, |
| "loss": 0.0826, |
| "mean_token_accuracy": 0.9772631600499153, |
| "step": 1135 |
| }, |
| { |
| "epoch": 6.645161290322581, |
| "grad_norm": 0.6772433811470678, |
| "learning_rate": 1.342897883501872e-05, |
| "loss": 0.0732, |
| "mean_token_accuracy": 0.9791544526815414, |
| "step": 1136 |
| }, |
| { |
| "epoch": 6.651026392961877, |
| "grad_norm": 0.5203169937735215, |
| "learning_rate": 1.3398838967899477e-05, |
| "loss": 0.0642, |
| "mean_token_accuracy": 0.978813648223877, |
| "step": 1137 |
| }, |
| { |
| "epoch": 6.656891495601173, |
| "grad_norm": 0.42941729523725874, |
| "learning_rate": 1.3368730319520992e-05, |
| "loss": 0.0642, |
| "mean_token_accuracy": 0.9808278232812881, |
| "step": 1138 |
| }, |
| { |
| "epoch": 6.6627565982404695, |
| "grad_norm": 0.4520583507512642, |
| "learning_rate": 1.3338652999165511e-05, |
| "loss": 0.0672, |
| "mean_token_accuracy": 0.9790240898728371, |
| "step": 1139 |
| }, |
| { |
| "epoch": 6.668621700879766, |
| "grad_norm": 0.3642854711728884, |
| "learning_rate": 1.3308607116001549e-05, |
| "loss": 0.0572, |
| "mean_token_accuracy": 0.98244908452034, |
| "step": 1140 |
| }, |
| { |
| "epoch": 6.674486803519062, |
| "grad_norm": 0.521170844330364, |
| "learning_rate": 1.3278592779083534e-05, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.983853779733181, |
| "step": 1141 |
| }, |
| { |
| "epoch": 6.680351906158358, |
| "grad_norm": 0.39924193832736776, |
| "learning_rate": 1.324861009735138e-05, |
| "loss": 0.0607, |
| "mean_token_accuracy": 0.9806713908910751, |
| "step": 1142 |
| }, |
| { |
| "epoch": 6.686217008797654, |
| "grad_norm": 0.4039864199512111, |
| "learning_rate": 1.3218659179630112e-05, |
| "loss": 0.0645, |
| "mean_token_accuracy": 0.9802507907152176, |
| "step": 1143 |
| }, |
| { |
| "epoch": 6.69208211143695, |
| "grad_norm": 0.5196436019363877, |
| "learning_rate": 1.3188740134629469e-05, |
| "loss": 0.0665, |
| "mean_token_accuracy": 0.9800485447049141, |
| "step": 1144 |
| }, |
| { |
| "epoch": 6.697947214076247, |
| "grad_norm": 0.42089828963999687, |
| "learning_rate": 1.3158853070943499e-05, |
| "loss": 0.0553, |
| "mean_token_accuracy": 0.9817090556025505, |
| "step": 1145 |
| }, |
| { |
| "epoch": 6.703812316715543, |
| "grad_norm": 0.5007957363563511, |
| "learning_rate": 1.3128998097050174e-05, |
| "loss": 0.0659, |
| "mean_token_accuracy": 0.9803542569279671, |
| "step": 1146 |
| }, |
| { |
| "epoch": 6.709677419354839, |
| "grad_norm": 0.5010376117207007, |
| "learning_rate": 1.3099175321310993e-05, |
| "loss": 0.066, |
| "mean_token_accuracy": 0.979380339384079, |
| "step": 1147 |
| }, |
| { |
| "epoch": 6.715542521994135, |
| "grad_norm": 0.3592596677655769, |
| "learning_rate": 1.3069384851970584e-05, |
| "loss": 0.0554, |
| "mean_token_accuracy": 0.9809225648641586, |
| "step": 1148 |
| }, |
| { |
| "epoch": 6.721407624633431, |
| "grad_norm": 0.47254547050064377, |
| "learning_rate": 1.3039626797156321e-05, |
| "loss": 0.0613, |
| "mean_token_accuracy": 0.9800109416246414, |
| "step": 1149 |
| }, |
| { |
| "epoch": 6.7272727272727275, |
| "grad_norm": 0.40129886920431546, |
| "learning_rate": 1.3009901264877924e-05, |
| "loss": 0.0588, |
| "mean_token_accuracy": 0.9832498729228973, |
| "step": 1150 |
| }, |
| { |
| "epoch": 6.733137829912024, |
| "grad_norm": 0.3962732126618426, |
| "learning_rate": 1.298020836302707e-05, |
| "loss": 0.0617, |
| "mean_token_accuracy": 0.9801494553685188, |
| "step": 1151 |
| }, |
| { |
| "epoch": 6.73900293255132, |
| "grad_norm": 0.4094241439229563, |
| "learning_rate": 1.2950548199376999e-05, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9825976863503456, |
| "step": 1152 |
| }, |
| { |
| "epoch": 6.744868035190616, |
| "grad_norm": 0.5378068116582913, |
| "learning_rate": 1.292092088158213e-05, |
| "loss": 0.0676, |
| "mean_token_accuracy": 0.9790525138378143, |
| "step": 1153 |
| }, |
| { |
| "epoch": 6.750733137829912, |
| "grad_norm": 0.3968231076570657, |
| "learning_rate": 1.2891326517177663e-05, |
| "loss": 0.056, |
| "mean_token_accuracy": 0.9852932840585709, |
| "step": 1154 |
| }, |
| { |
| "epoch": 6.756598240469208, |
| "grad_norm": 0.48590510329215825, |
| "learning_rate": 1.2861765213579177e-05, |
| "loss": 0.069, |
| "mean_token_accuracy": 0.9758260548114777, |
| "step": 1155 |
| }, |
| { |
| "epoch": 6.762463343108505, |
| "grad_norm": 0.4499456893763865, |
| "learning_rate": 1.2832237078082272e-05, |
| "loss": 0.0588, |
| "mean_token_accuracy": 0.9807997494935989, |
| "step": 1156 |
| }, |
| { |
| "epoch": 6.768328445747801, |
| "grad_norm": 0.4467830869566874, |
| "learning_rate": 1.2802742217862156e-05, |
| "loss": 0.0659, |
| "mean_token_accuracy": 0.9799171090126038, |
| "step": 1157 |
| }, |
| { |
| "epoch": 6.774193548387097, |
| "grad_norm": 0.4405266054873109, |
| "learning_rate": 1.2773280739973255e-05, |
| "loss": 0.0671, |
| "mean_token_accuracy": 0.977348655462265, |
| "step": 1158 |
| }, |
| { |
| "epoch": 6.780058651026393, |
| "grad_norm": 0.400408649403103, |
| "learning_rate": 1.2743852751348833e-05, |
| "loss": 0.0553, |
| "mean_token_accuracy": 0.9837123081088066, |
| "step": 1159 |
| }, |
| { |
| "epoch": 6.785923753665689, |
| "grad_norm": 0.3887758632837316, |
| "learning_rate": 1.2714458358800612e-05, |
| "loss": 0.0471, |
| "mean_token_accuracy": 0.9850385040044785, |
| "step": 1160 |
| }, |
| { |
| "epoch": 6.7917888563049855, |
| "grad_norm": 0.5172676745480713, |
| "learning_rate": 1.2685097669018362e-05, |
| "loss": 0.0689, |
| "mean_token_accuracy": 0.9788277596235275, |
| "step": 1161 |
| }, |
| { |
| "epoch": 6.797653958944282, |
| "grad_norm": 0.432827905221815, |
| "learning_rate": 1.265577078856953e-05, |
| "loss": 0.0671, |
| "mean_token_accuracy": 0.9756351113319397, |
| "step": 1162 |
| }, |
| { |
| "epoch": 6.803519061583578, |
| "grad_norm": 0.5482725408821638, |
| "learning_rate": 1.2626477823898843e-05, |
| "loss": 0.072, |
| "mean_token_accuracy": 0.979312427341938, |
| "step": 1163 |
| }, |
| { |
| "epoch": 6.809384164222874, |
| "grad_norm": 0.5005950667187236, |
| "learning_rate": 1.2597218881327944e-05, |
| "loss": 0.0644, |
| "mean_token_accuracy": 0.9781336486339569, |
| "step": 1164 |
| }, |
| { |
| "epoch": 6.81524926686217, |
| "grad_norm": 0.3979272375106252, |
| "learning_rate": 1.2567994067054961e-05, |
| "loss": 0.0624, |
| "mean_token_accuracy": 0.9795544818043709, |
| "step": 1165 |
| }, |
| { |
| "epoch": 6.821114369501466, |
| "grad_norm": 0.47983624496521715, |
| "learning_rate": 1.2538803487154177e-05, |
| "loss": 0.0646, |
| "mean_token_accuracy": 0.9791703373193741, |
| "step": 1166 |
| }, |
| { |
| "epoch": 6.826979472140763, |
| "grad_norm": 0.4792233882496042, |
| "learning_rate": 1.25096472475756e-05, |
| "loss": 0.0643, |
| "mean_token_accuracy": 0.9790126904845238, |
| "step": 1167 |
| }, |
| { |
| "epoch": 6.832844574780059, |
| "grad_norm": 0.44047811584075663, |
| "learning_rate": 1.248052545414461e-05, |
| "loss": 0.0666, |
| "mean_token_accuracy": 0.9815377816557884, |
| "step": 1168 |
| }, |
| { |
| "epoch": 6.838709677419355, |
| "grad_norm": 0.45039377868298586, |
| "learning_rate": 1.2451438212561556e-05, |
| "loss": 0.0728, |
| "mean_token_accuracy": 0.9747223258018494, |
| "step": 1169 |
| }, |
| { |
| "epoch": 6.844574780058651, |
| "grad_norm": 0.437389204436552, |
| "learning_rate": 1.2422385628401377e-05, |
| "loss": 0.064, |
| "mean_token_accuracy": 0.9793856963515282, |
| "step": 1170 |
| }, |
| { |
| "epoch": 6.850439882697947, |
| "grad_norm": 0.488370718643388, |
| "learning_rate": 1.2393367807113217e-05, |
| "loss": 0.0658, |
| "mean_token_accuracy": 0.9805554449558258, |
| "step": 1171 |
| }, |
| { |
| "epoch": 6.8563049853372435, |
| "grad_norm": 0.39447639676794705, |
| "learning_rate": 1.236438485402005e-05, |
| "loss": 0.0632, |
| "mean_token_accuracy": 0.9815583750605583, |
| "step": 1172 |
| }, |
| { |
| "epoch": 6.86217008797654, |
| "grad_norm": 0.3808687072825325, |
| "learning_rate": 1.2335436874318293e-05, |
| "loss": 0.0593, |
| "mean_token_accuracy": 0.9811095669865608, |
| "step": 1173 |
| }, |
| { |
| "epoch": 6.868035190615836, |
| "grad_norm": 0.45599887703857295, |
| "learning_rate": 1.2306523973077416e-05, |
| "loss": 0.0714, |
| "mean_token_accuracy": 0.9796115532517433, |
| "step": 1174 |
| }, |
| { |
| "epoch": 6.873900293255132, |
| "grad_norm": 0.47569451377053196, |
| "learning_rate": 1.2277646255239572e-05, |
| "loss": 0.0694, |
| "mean_token_accuracy": 0.98045764118433, |
| "step": 1175 |
| }, |
| { |
| "epoch": 6.879765395894428, |
| "grad_norm": 0.46631060127689494, |
| "learning_rate": 1.2248803825619224e-05, |
| "loss": 0.0648, |
| "mean_token_accuracy": 0.9787806421518326, |
| "step": 1176 |
| }, |
| { |
| "epoch": 6.885630498533724, |
| "grad_norm": 0.6885890063590842, |
| "learning_rate": 1.2219996788902734e-05, |
| "loss": 0.0689, |
| "mean_token_accuracy": 0.978795975446701, |
| "step": 1177 |
| }, |
| { |
| "epoch": 6.891495601173021, |
| "grad_norm": 0.4303213452595518, |
| "learning_rate": 1.2191225249648016e-05, |
| "loss": 0.0602, |
| "mean_token_accuracy": 0.9798460155725479, |
| "step": 1178 |
| }, |
| { |
| "epoch": 6.897360703812317, |
| "grad_norm": 0.3937230216041198, |
| "learning_rate": 1.216248931228413e-05, |
| "loss": 0.0667, |
| "mean_token_accuracy": 0.977984793484211, |
| "step": 1179 |
| }, |
| { |
| "epoch": 6.903225806451613, |
| "grad_norm": 0.38130916152890315, |
| "learning_rate": 1.2133789081110927e-05, |
| "loss": 0.0595, |
| "mean_token_accuracy": 0.981584794819355, |
| "step": 1180 |
| }, |
| { |
| "epoch": 6.909090909090909, |
| "grad_norm": 0.46142872676320174, |
| "learning_rate": 1.2105124660298655e-05, |
| "loss": 0.0625, |
| "mean_token_accuracy": 0.9815917834639549, |
| "step": 1181 |
| }, |
| { |
| "epoch": 6.914956011730205, |
| "grad_norm": 0.43182919024308025, |
| "learning_rate": 1.2076496153887587e-05, |
| "loss": 0.0591, |
| "mean_token_accuracy": 0.9817899540066719, |
| "step": 1182 |
| }, |
| { |
| "epoch": 6.9208211143695015, |
| "grad_norm": 0.42635933383608365, |
| "learning_rate": 1.2047903665787633e-05, |
| "loss": 0.0618, |
| "mean_token_accuracy": 0.9816203713417053, |
| "step": 1183 |
| }, |
| { |
| "epoch": 6.926686217008798, |
| "grad_norm": 0.41330290477227893, |
| "learning_rate": 1.2019347299777981e-05, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9830499663949013, |
| "step": 1184 |
| }, |
| { |
| "epoch": 6.932551319648094, |
| "grad_norm": 0.4565062990930812, |
| "learning_rate": 1.199082715950671e-05, |
| "loss": 0.071, |
| "mean_token_accuracy": 0.9775069504976273, |
| "step": 1185 |
| }, |
| { |
| "epoch": 6.93841642228739, |
| "grad_norm": 0.4137224208578769, |
| "learning_rate": 1.1962343348490407e-05, |
| "loss": 0.0578, |
| "mean_token_accuracy": 0.980716660618782, |
| "step": 1186 |
| }, |
| { |
| "epoch": 6.944281524926686, |
| "grad_norm": 0.3948613864092428, |
| "learning_rate": 1.1933895970113798e-05, |
| "loss": 0.0652, |
| "mean_token_accuracy": 0.9824345782399178, |
| "step": 1187 |
| }, |
| { |
| "epoch": 6.9501466275659824, |
| "grad_norm": 0.5122310403133853, |
| "learning_rate": 1.1905485127629387e-05, |
| "loss": 0.0709, |
| "mean_token_accuracy": 0.9782714620232582, |
| "step": 1188 |
| }, |
| { |
| "epoch": 6.956011730205279, |
| "grad_norm": 0.4604201900693111, |
| "learning_rate": 1.1877110924157046e-05, |
| "loss": 0.0612, |
| "mean_token_accuracy": 0.9789381250739098, |
| "step": 1189 |
| }, |
| { |
| "epoch": 6.961876832844575, |
| "grad_norm": 0.4017710192115634, |
| "learning_rate": 1.1848773462683684e-05, |
| "loss": 0.0673, |
| "mean_token_accuracy": 0.9794546961784363, |
| "step": 1190 |
| }, |
| { |
| "epoch": 6.967741935483871, |
| "grad_norm": 0.47930547919907945, |
| "learning_rate": 1.1820472846062842e-05, |
| "loss": 0.0628, |
| "mean_token_accuracy": 0.9796619564294815, |
| "step": 1191 |
| }, |
| { |
| "epoch": 6.973607038123167, |
| "grad_norm": 0.4406226699463579, |
| "learning_rate": 1.1792209177014317e-05, |
| "loss": 0.0625, |
| "mean_token_accuracy": 0.9820540770888329, |
| "step": 1192 |
| }, |
| { |
| "epoch": 6.979472140762463, |
| "grad_norm": 0.4166350298425514, |
| "learning_rate": 1.1763982558123823e-05, |
| "loss": 0.0643, |
| "mean_token_accuracy": 0.9803269580006599, |
| "step": 1193 |
| }, |
| { |
| "epoch": 6.9853372434017595, |
| "grad_norm": 0.5067924863320438, |
| "learning_rate": 1.1735793091842583e-05, |
| "loss": 0.0673, |
| "mean_token_accuracy": 0.9796130433678627, |
| "step": 1194 |
| }, |
| { |
| "epoch": 6.991202346041056, |
| "grad_norm": 0.5131689327156483, |
| "learning_rate": 1.1707640880486975e-05, |
| "loss": 0.0786, |
| "mean_token_accuracy": 0.9753068685531616, |
| "step": 1195 |
| }, |
| { |
| "epoch": 6.997067448680352, |
| "grad_norm": 0.27953688453676967, |
| "learning_rate": 1.1679526026238155e-05, |
| "loss": 0.0494, |
| "mean_token_accuracy": 0.9843882694840431, |
| "step": 1196 |
| }, |
| { |
| "epoch": 7.0, |
| "grad_norm": 0.27953688453676967, |
| "learning_rate": 1.165144863114169e-05, |
| "loss": 0.0556, |
| "mean_token_accuracy": 0.9817256480455399, |
| "step": 1197 |
| }, |
| { |
| "epoch": 7.005865102639296, |
| "grad_norm": 0.6373555062592029, |
| "learning_rate": 1.1623408797107185e-05, |
| "loss": 0.064, |
| "mean_token_accuracy": 0.9785372838377953, |
| "step": 1198 |
| }, |
| { |
| "epoch": 7.011730205278592, |
| "grad_norm": 0.43666510254787677, |
| "learning_rate": 1.1595406625907914e-05, |
| "loss": 0.0548, |
| "mean_token_accuracy": 0.9841993674635887, |
| "step": 1199 |
| }, |
| { |
| "epoch": 7.0175953079178885, |
| "grad_norm": 0.36290262869575624, |
| "learning_rate": 1.1567442219180446e-05, |
| "loss": 0.0528, |
| "mean_token_accuracy": 0.9837799668312073, |
| "step": 1200 |
| }, |
| { |
| "epoch": 7.023460410557185, |
| "grad_norm": 0.3421833694866547, |
| "learning_rate": 1.153951567842429e-05, |
| "loss": 0.0487, |
| "mean_token_accuracy": 0.9848127514123917, |
| "step": 1201 |
| }, |
| { |
| "epoch": 7.029325513196481, |
| "grad_norm": 0.33545395972804354, |
| "learning_rate": 1.1511627105001501e-05, |
| "loss": 0.0581, |
| "mean_token_accuracy": 0.9827064424753189, |
| "step": 1202 |
| }, |
| { |
| "epoch": 7.035190615835777, |
| "grad_norm": 0.45031796131371543, |
| "learning_rate": 1.1483776600136344e-05, |
| "loss": 0.0636, |
| "mean_token_accuracy": 0.9784261807799339, |
| "step": 1203 |
| }, |
| { |
| "epoch": 7.041055718475073, |
| "grad_norm": 0.43121710516783884, |
| "learning_rate": 1.1455964264914906e-05, |
| "loss": 0.0512, |
| "mean_token_accuracy": 0.9828193038702011, |
| "step": 1204 |
| }, |
| { |
| "epoch": 7.0469208211143695, |
| "grad_norm": 0.3947272877786806, |
| "learning_rate": 1.142819020028472e-05, |
| "loss": 0.0632, |
| "mean_token_accuracy": 0.9800242558121681, |
| "step": 1205 |
| }, |
| { |
| "epoch": 7.052785923753666, |
| "grad_norm": 0.4274474592379843, |
| "learning_rate": 1.140045450705443e-05, |
| "loss": 0.0564, |
| "mean_token_accuracy": 0.9798077195882797, |
| "step": 1206 |
| }, |
| { |
| "epoch": 7.058651026392962, |
| "grad_norm": 0.3834033075631306, |
| "learning_rate": 1.13727572858934e-05, |
| "loss": 0.0511, |
| "mean_token_accuracy": 0.983852356672287, |
| "step": 1207 |
| }, |
| { |
| "epoch": 7.064516129032258, |
| "grad_norm": 0.3655976301540036, |
| "learning_rate": 1.1345098637331356e-05, |
| "loss": 0.0508, |
| "mean_token_accuracy": 0.9838704839348793, |
| "step": 1208 |
| }, |
| { |
| "epoch": 7.070381231671554, |
| "grad_norm": 0.5229786475813798, |
| "learning_rate": 1.1317478661758022e-05, |
| "loss": 0.0712, |
| "mean_token_accuracy": 0.9760942086577415, |
| "step": 1209 |
| }, |
| { |
| "epoch": 7.07624633431085, |
| "grad_norm": 0.4346420863510563, |
| "learning_rate": 1.1289897459422756e-05, |
| "loss": 0.0536, |
| "mean_token_accuracy": 0.9831833392381668, |
| "step": 1210 |
| }, |
| { |
| "epoch": 7.0821114369501466, |
| "grad_norm": 0.5593461337456281, |
| "learning_rate": 1.126235513043418e-05, |
| "loss": 0.0642, |
| "mean_token_accuracy": 0.9789699912071228, |
| "step": 1211 |
| }, |
| { |
| "epoch": 7.087976539589443, |
| "grad_norm": 0.43900643121666977, |
| "learning_rate": 1.1234851774759828e-05, |
| "loss": 0.0524, |
| "mean_token_accuracy": 0.9849435314536095, |
| "step": 1212 |
| }, |
| { |
| "epoch": 7.093841642228739, |
| "grad_norm": 0.34888430248322444, |
| "learning_rate": 1.1207387492225772e-05, |
| "loss": 0.0545, |
| "mean_token_accuracy": 0.9824554324150085, |
| "step": 1213 |
| }, |
| { |
| "epoch": 7.099706744868035, |
| "grad_norm": 0.3819215648450502, |
| "learning_rate": 1.1179962382516268e-05, |
| "loss": 0.0648, |
| "mean_token_accuracy": 0.9815644100308418, |
| "step": 1214 |
| }, |
| { |
| "epoch": 7.105571847507331, |
| "grad_norm": 0.4018294483259087, |
| "learning_rate": 1.1152576545173388e-05, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9836894050240517, |
| "step": 1215 |
| }, |
| { |
| "epoch": 7.1114369501466275, |
| "grad_norm": 0.3447430653572727, |
| "learning_rate": 1.1125230079596654e-05, |
| "loss": 0.0511, |
| "mean_token_accuracy": 0.9840358719229698, |
| "step": 1216 |
| }, |
| { |
| "epoch": 7.117302052785924, |
| "grad_norm": 0.4191025482663613, |
| "learning_rate": 1.10979230850427e-05, |
| "loss": 0.0551, |
| "mean_token_accuracy": 0.9832866340875626, |
| "step": 1217 |
| }, |
| { |
| "epoch": 7.12316715542522, |
| "grad_norm": 0.37400953837380885, |
| "learning_rate": 1.1070655660624876e-05, |
| "loss": 0.0607, |
| "mean_token_accuracy": 0.980600893497467, |
| "step": 1218 |
| }, |
| { |
| "epoch": 7.129032258064516, |
| "grad_norm": 0.37485779498534494, |
| "learning_rate": 1.1043427905312933e-05, |
| "loss": 0.0632, |
| "mean_token_accuracy": 0.9809056371450424, |
| "step": 1219 |
| }, |
| { |
| "epoch": 7.134897360703812, |
| "grad_norm": 0.37279266122405813, |
| "learning_rate": 1.1016239917932618e-05, |
| "loss": 0.0572, |
| "mean_token_accuracy": 0.9826710894703865, |
| "step": 1220 |
| }, |
| { |
| "epoch": 7.140762463343108, |
| "grad_norm": 0.3877743126318198, |
| "learning_rate": 1.098909179716535e-05, |
| "loss": 0.0617, |
| "mean_token_accuracy": 0.9800911992788315, |
| "step": 1221 |
| }, |
| { |
| "epoch": 7.146627565982405, |
| "grad_norm": 0.48036986394012565, |
| "learning_rate": 1.096198364154784e-05, |
| "loss": 0.0602, |
| "mean_token_accuracy": 0.9828939959406853, |
| "step": 1222 |
| }, |
| { |
| "epoch": 7.152492668621701, |
| "grad_norm": 0.3866461897489377, |
| "learning_rate": 1.0934915549471747e-05, |
| "loss": 0.0526, |
| "mean_token_accuracy": 0.9831520467996597, |
| "step": 1223 |
| }, |
| { |
| "epoch": 7.158357771260997, |
| "grad_norm": 0.3775525332017602, |
| "learning_rate": 1.0907887619183308e-05, |
| "loss": 0.053, |
| "mean_token_accuracy": 0.9841544181108475, |
| "step": 1224 |
| }, |
| { |
| "epoch": 7.164222873900293, |
| "grad_norm": 0.3836873587260523, |
| "learning_rate": 1.0880899948783002e-05, |
| "loss": 0.0592, |
| "mean_token_accuracy": 0.9817483797669411, |
| "step": 1225 |
| }, |
| { |
| "epoch": 7.170087976539589, |
| "grad_norm": 0.41100260358880575, |
| "learning_rate": 1.0853952636225165e-05, |
| "loss": 0.0613, |
| "mean_token_accuracy": 0.9828031435608864, |
| "step": 1226 |
| }, |
| { |
| "epoch": 7.1759530791788855, |
| "grad_norm": 0.4219853989912052, |
| "learning_rate": 1.0827045779317662e-05, |
| "loss": 0.0543, |
| "mean_token_accuracy": 0.9837125688791275, |
| "step": 1227 |
| }, |
| { |
| "epoch": 7.181818181818182, |
| "grad_norm": 0.43702873517895335, |
| "learning_rate": 1.080017947572152e-05, |
| "loss": 0.0543, |
| "mean_token_accuracy": 0.9834098666906357, |
| "step": 1228 |
| }, |
| { |
| "epoch": 7.187683284457478, |
| "grad_norm": 0.41016359666667, |
| "learning_rate": 1.0773353822950563e-05, |
| "loss": 0.0609, |
| "mean_token_accuracy": 0.9809712171554565, |
| "step": 1229 |
| }, |
| { |
| "epoch": 7.193548387096774, |
| "grad_norm": 0.49383511140721953, |
| "learning_rate": 1.074656891837108e-05, |
| "loss": 0.0515, |
| "mean_token_accuracy": 0.9839422553777695, |
| "step": 1230 |
| }, |
| { |
| "epoch": 7.19941348973607, |
| "grad_norm": 0.3956069878697063, |
| "learning_rate": 1.0719824859201457e-05, |
| "loss": 0.058, |
| "mean_token_accuracy": 0.9819058403372765, |
| "step": 1231 |
| }, |
| { |
| "epoch": 7.205278592375366, |
| "grad_norm": 0.4792058762225757, |
| "learning_rate": 1.0693121742511828e-05, |
| "loss": 0.0668, |
| "mean_token_accuracy": 0.9765582084655762, |
| "step": 1232 |
| }, |
| { |
| "epoch": 7.211143695014663, |
| "grad_norm": 0.4338995192160771, |
| "learning_rate": 1.0666459665223718e-05, |
| "loss": 0.0593, |
| "mean_token_accuracy": 0.981715977191925, |
| "step": 1233 |
| }, |
| { |
| "epoch": 7.217008797653959, |
| "grad_norm": 0.3855492769098808, |
| "learning_rate": 1.0639838724109708e-05, |
| "loss": 0.0575, |
| "mean_token_accuracy": 0.9826395660638809, |
| "step": 1234 |
| }, |
| { |
| "epoch": 7.222873900293255, |
| "grad_norm": 0.3437414453827899, |
| "learning_rate": 1.0613259015793056e-05, |
| "loss": 0.0506, |
| "mean_token_accuracy": 0.9838635697960854, |
| "step": 1235 |
| }, |
| { |
| "epoch": 7.228739002932551, |
| "grad_norm": 0.37946171096010817, |
| "learning_rate": 1.0586720636747368e-05, |
| "loss": 0.0612, |
| "mean_token_accuracy": 0.9806694537401199, |
| "step": 1236 |
| }, |
| { |
| "epoch": 7.234604105571847, |
| "grad_norm": 0.38714008229821795, |
| "learning_rate": 1.0560223683296244e-05, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9813293144106865, |
| "step": 1237 |
| }, |
| { |
| "epoch": 7.2404692082111435, |
| "grad_norm": 0.40380335937441236, |
| "learning_rate": 1.0533768251612924e-05, |
| "loss": 0.0631, |
| "mean_token_accuracy": 0.9799295514822006, |
| "step": 1238 |
| }, |
| { |
| "epoch": 7.24633431085044, |
| "grad_norm": 0.4046737544797061, |
| "learning_rate": 1.0507354437719938e-05, |
| "loss": 0.0512, |
| "mean_token_accuracy": 0.9842623844742775, |
| "step": 1239 |
| }, |
| { |
| "epoch": 7.252199413489736, |
| "grad_norm": 0.3344455462825253, |
| "learning_rate": 1.0480982337488768e-05, |
| "loss": 0.0553, |
| "mean_token_accuracy": 0.9825280457735062, |
| "step": 1240 |
| }, |
| { |
| "epoch": 7.258064516129032, |
| "grad_norm": 0.40570311112116275, |
| "learning_rate": 1.0454652046639486e-05, |
| "loss": 0.0612, |
| "mean_token_accuracy": 0.9812857285141945, |
| "step": 1241 |
| }, |
| { |
| "epoch": 7.263929618768328, |
| "grad_norm": 0.47975992267665485, |
| "learning_rate": 1.0428363660740407e-05, |
| "loss": 0.0592, |
| "mean_token_accuracy": 0.9803736358880997, |
| "step": 1242 |
| }, |
| { |
| "epoch": 7.269794721407624, |
| "grad_norm": 0.4288916334704027, |
| "learning_rate": 1.0402117275207757e-05, |
| "loss": 0.0628, |
| "mean_token_accuracy": 0.980836883187294, |
| "step": 1243 |
| }, |
| { |
| "epoch": 7.275659824046921, |
| "grad_norm": 0.4048834204911636, |
| "learning_rate": 1.0375912985305319e-05, |
| "loss": 0.058, |
| "mean_token_accuracy": 0.9816715195775032, |
| "step": 1244 |
| }, |
| { |
| "epoch": 7.281524926686217, |
| "grad_norm": 0.33766750492275605, |
| "learning_rate": 1.0349750886144077e-05, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9801446571946144, |
| "step": 1245 |
| }, |
| { |
| "epoch": 7.287390029325513, |
| "grad_norm": 0.3553392017238559, |
| "learning_rate": 1.0323631072681888e-05, |
| "loss": 0.0561, |
| "mean_token_accuracy": 0.982564315199852, |
| "step": 1246 |
| }, |
| { |
| "epoch": 7.293255131964809, |
| "grad_norm": 0.383982297271317, |
| "learning_rate": 1.0297553639723123e-05, |
| "loss": 0.0536, |
| "mean_token_accuracy": 0.9845903739333153, |
| "step": 1247 |
| }, |
| { |
| "epoch": 7.299120234604105, |
| "grad_norm": 0.4060031394599371, |
| "learning_rate": 1.027151868191834e-05, |
| "loss": 0.0609, |
| "mean_token_accuracy": 0.9797234684228897, |
| "step": 1248 |
| }, |
| { |
| "epoch": 7.3049853372434015, |
| "grad_norm": 0.42793813736937725, |
| "learning_rate": 1.0245526293763908e-05, |
| "loss": 0.0694, |
| "mean_token_accuracy": 0.9779497757554054, |
| "step": 1249 |
| }, |
| { |
| "epoch": 7.310850439882698, |
| "grad_norm": 0.4167380663869755, |
| "learning_rate": 1.0219576569601707e-05, |
| "loss": 0.0681, |
| "mean_token_accuracy": 0.9799009039998055, |
| "step": 1250 |
| }, |
| { |
| "epoch": 7.316715542521994, |
| "grad_norm": 0.4377973731399899, |
| "learning_rate": 1.0193669603618757e-05, |
| "loss": 0.0605, |
| "mean_token_accuracy": 0.9804759994149208, |
| "step": 1251 |
| }, |
| { |
| "epoch": 7.32258064516129, |
| "grad_norm": 0.313681809223758, |
| "learning_rate": 1.0167805489846873e-05, |
| "loss": 0.0548, |
| "mean_token_accuracy": 0.9833528101444244, |
| "step": 1252 |
| }, |
| { |
| "epoch": 7.328445747800586, |
| "grad_norm": 0.35044614156187415, |
| "learning_rate": 1.0141984322162353e-05, |
| "loss": 0.0569, |
| "mean_token_accuracy": 0.9819622039794922, |
| "step": 1253 |
| }, |
| { |
| "epoch": 7.334310850439882, |
| "grad_norm": 0.37793729635473716, |
| "learning_rate": 1.0116206194285598e-05, |
| "loss": 0.0604, |
| "mean_token_accuracy": 0.9819350242614746, |
| "step": 1254 |
| }, |
| { |
| "epoch": 7.340175953079179, |
| "grad_norm": 0.40558850664867346, |
| "learning_rate": 1.0090471199780812e-05, |
| "loss": 0.0635, |
| "mean_token_accuracy": 0.9799067080020905, |
| "step": 1255 |
| }, |
| { |
| "epoch": 7.346041055718475, |
| "grad_norm": 0.3360663531138135, |
| "learning_rate": 1.0064779432055616e-05, |
| "loss": 0.0586, |
| "mean_token_accuracy": 0.9792345017194748, |
| "step": 1256 |
| }, |
| { |
| "epoch": 7.351906158357771, |
| "grad_norm": 0.37918955936120036, |
| "learning_rate": 1.0039130984360761e-05, |
| "loss": 0.0584, |
| "mean_token_accuracy": 0.9800141528248787, |
| "step": 1257 |
| }, |
| { |
| "epoch": 7.357771260997067, |
| "grad_norm": 0.4157661176569054, |
| "learning_rate": 1.0013525949789745e-05, |
| "loss": 0.0593, |
| "mean_token_accuracy": 0.9802781492471695, |
| "step": 1258 |
| }, |
| { |
| "epoch": 7.363636363636363, |
| "grad_norm": 0.45087026261043445, |
| "learning_rate": 9.987964421278512e-06, |
| "loss": 0.0555, |
| "mean_token_accuracy": 0.9836331158876419, |
| "step": 1259 |
| }, |
| { |
| "epoch": 7.3695014662756595, |
| "grad_norm": 0.3804640426965221, |
| "learning_rate": 9.962446491605084e-06, |
| "loss": 0.0592, |
| "mean_token_accuracy": 0.978935495018959, |
| "step": 1260 |
| }, |
| { |
| "epoch": 7.375366568914956, |
| "grad_norm": 0.46616629391754066, |
| "learning_rate": 9.936972253389235e-06, |
| "loss": 0.0548, |
| "mean_token_accuracy": 0.9817307665944099, |
| "step": 1261 |
| }, |
| { |
| "epoch": 7.381231671554252, |
| "grad_norm": 0.44707097128486495, |
| "learning_rate": 9.911541799092162e-06, |
| "loss": 0.0618, |
| "mean_token_accuracy": 0.9799565002322197, |
| "step": 1262 |
| }, |
| { |
| "epoch": 7.387096774193548, |
| "grad_norm": 0.3101720172363353, |
| "learning_rate": 9.88615522101615e-06, |
| "loss": 0.0537, |
| "mean_token_accuracy": 0.9818929210305214, |
| "step": 1263 |
| }, |
| { |
| "epoch": 7.392961876832844, |
| "grad_norm": 0.3549119289206168, |
| "learning_rate": 9.860812611304225e-06, |
| "loss": 0.0531, |
| "mean_token_accuracy": 0.9823939129710197, |
| "step": 1264 |
| }, |
| { |
| "epoch": 7.39882697947214, |
| "grad_norm": 0.32942986436762367, |
| "learning_rate": 9.835514061939822e-06, |
| "loss": 0.0507, |
| "mean_token_accuracy": 0.9844275042414665, |
| "step": 1265 |
| }, |
| { |
| "epoch": 7.404692082111437, |
| "grad_norm": 0.36175422344149233, |
| "learning_rate": 9.810259664746454e-06, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9809439033269882, |
| "step": 1266 |
| }, |
| { |
| "epoch": 7.410557184750733, |
| "grad_norm": 0.37478557515730054, |
| "learning_rate": 9.785049511387383e-06, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.9803262427449226, |
| "step": 1267 |
| }, |
| { |
| "epoch": 7.416422287390029, |
| "grad_norm": 0.44581561472846937, |
| "learning_rate": 9.759883693365287e-06, |
| "loss": 0.0642, |
| "mean_token_accuracy": 0.9802709370851517, |
| "step": 1268 |
| }, |
| { |
| "epoch": 7.422287390029325, |
| "grad_norm": 0.41500145999728155, |
| "learning_rate": 9.734762302021923e-06, |
| "loss": 0.0484, |
| "mean_token_accuracy": 0.9843206256628036, |
| "step": 1269 |
| }, |
| { |
| "epoch": 7.428152492668621, |
| "grad_norm": 0.3840956000039933, |
| "learning_rate": 9.709685428537794e-06, |
| "loss": 0.056, |
| "mean_token_accuracy": 0.9839591979980469, |
| "step": 1270 |
| }, |
| { |
| "epoch": 7.4340175953079175, |
| "grad_norm": 0.4723303673366163, |
| "learning_rate": 9.684653163931823e-06, |
| "loss": 0.063, |
| "mean_token_accuracy": 0.9816402345895767, |
| "step": 1271 |
| }, |
| { |
| "epoch": 7.439882697947214, |
| "grad_norm": 0.4899840550170524, |
| "learning_rate": 9.659665599061019e-06, |
| "loss": 0.068, |
| "mean_token_accuracy": 0.9774189367890358, |
| "step": 1272 |
| }, |
| { |
| "epoch": 7.44574780058651, |
| "grad_norm": 0.3857766522115467, |
| "learning_rate": 9.634722824620154e-06, |
| "loss": 0.0545, |
| "mean_token_accuracy": 0.9810511991381645, |
| "step": 1273 |
| }, |
| { |
| "epoch": 7.451612903225806, |
| "grad_norm": 0.3758751737714007, |
| "learning_rate": 9.609824931141423e-06, |
| "loss": 0.0548, |
| "mean_token_accuracy": 0.9822421818971634, |
| "step": 1274 |
| }, |
| { |
| "epoch": 7.457478005865102, |
| "grad_norm": 0.40199226438816, |
| "learning_rate": 9.584972008994123e-06, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9830095544457436, |
| "step": 1275 |
| }, |
| { |
| "epoch": 7.463343108504398, |
| "grad_norm": 0.41133104233326856, |
| "learning_rate": 9.560164148384328e-06, |
| "loss": 0.0679, |
| "mean_token_accuracy": 0.9806720837950706, |
| "step": 1276 |
| }, |
| { |
| "epoch": 7.469208211143695, |
| "grad_norm": 0.3711841671942756, |
| "learning_rate": 9.53540143935455e-06, |
| "loss": 0.0603, |
| "mean_token_accuracy": 0.979490227997303, |
| "step": 1277 |
| }, |
| { |
| "epoch": 7.475073313782991, |
| "grad_norm": 0.4353724058654228, |
| "learning_rate": 9.510683971783425e-06, |
| "loss": 0.072, |
| "mean_token_accuracy": 0.9790943786501884, |
| "step": 1278 |
| }, |
| { |
| "epoch": 7.480938416422287, |
| "grad_norm": 0.3262040671207362, |
| "learning_rate": 9.486011835385372e-06, |
| "loss": 0.0432, |
| "mean_token_accuracy": 0.9866240471601486, |
| "step": 1279 |
| }, |
| { |
| "epoch": 7.486803519061583, |
| "grad_norm": 0.40084758967083595, |
| "learning_rate": 9.461385119710282e-06, |
| "loss": 0.0649, |
| "mean_token_accuracy": 0.9794245511293411, |
| "step": 1280 |
| }, |
| { |
| "epoch": 7.492668621700879, |
| "grad_norm": 0.37179990528957857, |
| "learning_rate": 9.436803914143189e-06, |
| "loss": 0.0649, |
| "mean_token_accuracy": 0.978649728000164, |
| "step": 1281 |
| }, |
| { |
| "epoch": 7.4985337243401755, |
| "grad_norm": 0.4144971947017167, |
| "learning_rate": 9.41226830790394e-06, |
| "loss": 0.0535, |
| "mean_token_accuracy": 0.9819266125559807, |
| "step": 1282 |
| }, |
| { |
| "epoch": 7.504398826979472, |
| "grad_norm": 0.41089012248038864, |
| "learning_rate": 9.387778390046881e-06, |
| "loss": 0.0567, |
| "mean_token_accuracy": 0.9813234284520149, |
| "step": 1283 |
| }, |
| { |
| "epoch": 7.510263929618768, |
| "grad_norm": 0.3299345875181496, |
| "learning_rate": 9.363334249460519e-06, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9827776104211807, |
| "step": 1284 |
| }, |
| { |
| "epoch": 7.516129032258064, |
| "grad_norm": 0.3343562585006663, |
| "learning_rate": 9.338935974867213e-06, |
| "loss": 0.0558, |
| "mean_token_accuracy": 0.9819636717438698, |
| "step": 1285 |
| }, |
| { |
| "epoch": 7.52199413489736, |
| "grad_norm": 0.4643635750642076, |
| "learning_rate": 9.314583654822844e-06, |
| "loss": 0.0632, |
| "mean_token_accuracy": 0.9794372394680977, |
| "step": 1286 |
| }, |
| { |
| "epoch": 7.527859237536656, |
| "grad_norm": 0.4244359984781323, |
| "learning_rate": 9.290277377716503e-06, |
| "loss": 0.066, |
| "mean_token_accuracy": 0.9777902364730835, |
| "step": 1287 |
| }, |
| { |
| "epoch": 7.533724340175953, |
| "grad_norm": 0.3949368748032798, |
| "learning_rate": 9.266017231770155e-06, |
| "loss": 0.0524, |
| "mean_token_accuracy": 0.9815265461802483, |
| "step": 1288 |
| }, |
| { |
| "epoch": 7.539589442815249, |
| "grad_norm": 0.37548934551322455, |
| "learning_rate": 9.241803305038333e-06, |
| "loss": 0.062, |
| "mean_token_accuracy": 0.980858251452446, |
| "step": 1289 |
| }, |
| { |
| "epoch": 7.545454545454545, |
| "grad_norm": 0.38084864607631785, |
| "learning_rate": 9.217635685407813e-06, |
| "loss": 0.057, |
| "mean_token_accuracy": 0.9823618158698082, |
| "step": 1290 |
| }, |
| { |
| "epoch": 7.551319648093841, |
| "grad_norm": 0.3614666166755709, |
| "learning_rate": 9.19351446059729e-06, |
| "loss": 0.0509, |
| "mean_token_accuracy": 0.9841725453734398, |
| "step": 1291 |
| }, |
| { |
| "epoch": 7.557184750733137, |
| "grad_norm": 0.32334267634784053, |
| "learning_rate": 9.16943971815708e-06, |
| "loss": 0.0547, |
| "mean_token_accuracy": 0.9836216494441032, |
| "step": 1292 |
| }, |
| { |
| "epoch": 7.563049853372434, |
| "grad_norm": 0.3549020438357482, |
| "learning_rate": 9.145411545468756e-06, |
| "loss": 0.0537, |
| "mean_token_accuracy": 0.9822108149528503, |
| "step": 1293 |
| }, |
| { |
| "epoch": 7.568914956011731, |
| "grad_norm": 0.414896679893959, |
| "learning_rate": 9.121430029744893e-06, |
| "loss": 0.0551, |
| "mean_token_accuracy": 0.9838257804512978, |
| "step": 1294 |
| }, |
| { |
| "epoch": 7.574780058651027, |
| "grad_norm": 0.4664630449633319, |
| "learning_rate": 9.097495258028703e-06, |
| "loss": 0.0613, |
| "mean_token_accuracy": 0.9797477498650551, |
| "step": 1295 |
| }, |
| { |
| "epoch": 7.580645161290323, |
| "grad_norm": 0.3624415444639565, |
| "learning_rate": 9.073607317193742e-06, |
| "loss": 0.0513, |
| "mean_token_accuracy": 0.9819745272397995, |
| "step": 1296 |
| }, |
| { |
| "epoch": 7.586510263929619, |
| "grad_norm": 0.362922738638366, |
| "learning_rate": 9.049766293943589e-06, |
| "loss": 0.0573, |
| "mean_token_accuracy": 0.9807760417461395, |
| "step": 1297 |
| }, |
| { |
| "epoch": 7.592375366568915, |
| "grad_norm": 0.4243136296793758, |
| "learning_rate": 9.025972274811527e-06, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9818306267261505, |
| "step": 1298 |
| }, |
| { |
| "epoch": 7.5982404692082115, |
| "grad_norm": 0.4425658209939251, |
| "learning_rate": 9.002225346160238e-06, |
| "loss": 0.0577, |
| "mean_token_accuracy": 0.9808589443564415, |
| "step": 1299 |
| }, |
| { |
| "epoch": 7.604105571847508, |
| "grad_norm": 0.3514744376386846, |
| "learning_rate": 8.97852559418148e-06, |
| "loss": 0.0531, |
| "mean_token_accuracy": 0.9827908128499985, |
| "step": 1300 |
| }, |
| { |
| "epoch": 7.609970674486804, |
| "grad_norm": 0.33745243601412556, |
| "learning_rate": 8.954873104895787e-06, |
| "loss": 0.0538, |
| "mean_token_accuracy": 0.9833011403679848, |
| "step": 1301 |
| }, |
| { |
| "epoch": 7.6158357771261, |
| "grad_norm": 0.3333868665559321, |
| "learning_rate": 8.931267964152132e-06, |
| "loss": 0.0573, |
| "mean_token_accuracy": 0.980949267745018, |
| "step": 1302 |
| }, |
| { |
| "epoch": 7.621700879765396, |
| "grad_norm": 0.39314271223090547, |
| "learning_rate": 8.907710257627651e-06, |
| "loss": 0.0574, |
| "mean_token_accuracy": 0.9824113622307777, |
| "step": 1303 |
| }, |
| { |
| "epoch": 7.627565982404692, |
| "grad_norm": 0.3388898421743972, |
| "learning_rate": 8.884200070827303e-06, |
| "loss": 0.0554, |
| "mean_token_accuracy": 0.9804334491491318, |
| "step": 1304 |
| }, |
| { |
| "epoch": 7.633431085043989, |
| "grad_norm": 0.37499471768779674, |
| "learning_rate": 8.86073748908357e-06, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.981963500380516, |
| "step": 1305 |
| }, |
| { |
| "epoch": 7.639296187683285, |
| "grad_norm": 0.38935220042170704, |
| "learning_rate": 8.837322597556146e-06, |
| "loss": 0.0546, |
| "mean_token_accuracy": 0.9819600731134415, |
| "step": 1306 |
| }, |
| { |
| "epoch": 7.645161290322581, |
| "grad_norm": 0.4422890162616974, |
| "learning_rate": 8.813955481231633e-06, |
| "loss": 0.0642, |
| "mean_token_accuracy": 0.9796778559684753, |
| "step": 1307 |
| }, |
| { |
| "epoch": 7.651026392961877, |
| "grad_norm": 0.35255417861474725, |
| "learning_rate": 8.790636224923221e-06, |
| "loss": 0.058, |
| "mean_token_accuracy": 0.9814234897494316, |
| "step": 1308 |
| }, |
| { |
| "epoch": 7.656891495601173, |
| "grad_norm": 0.39985957065533995, |
| "learning_rate": 8.767364913270399e-06, |
| "loss": 0.0592, |
| "mean_token_accuracy": 0.981181763112545, |
| "step": 1309 |
| }, |
| { |
| "epoch": 7.6627565982404695, |
| "grad_norm": 0.4134732505479203, |
| "learning_rate": 8.744141630738624e-06, |
| "loss": 0.0681, |
| "mean_token_accuracy": 0.9794332608580589, |
| "step": 1310 |
| }, |
| { |
| "epoch": 7.668621700879766, |
| "grad_norm": 0.4356893955326623, |
| "learning_rate": 8.720966461619038e-06, |
| "loss": 0.0617, |
| "mean_token_accuracy": 0.9818313270807266, |
| "step": 1311 |
| }, |
| { |
| "epoch": 7.674486803519062, |
| "grad_norm": 0.3989989585543621, |
| "learning_rate": 8.69783949002814e-06, |
| "loss": 0.054, |
| "mean_token_accuracy": 0.9826485440135002, |
| "step": 1312 |
| }, |
| { |
| "epoch": 7.680351906158358, |
| "grad_norm": 0.3359307886658749, |
| "learning_rate": 8.6747607999075e-06, |
| "loss": 0.0478, |
| "mean_token_accuracy": 0.9843815788626671, |
| "step": 1313 |
| }, |
| { |
| "epoch": 7.686217008797654, |
| "grad_norm": 0.3668758276797067, |
| "learning_rate": 8.651730475023435e-06, |
| "loss": 0.0621, |
| "mean_token_accuracy": 0.9808582216501236, |
| "step": 1314 |
| }, |
| { |
| "epoch": 7.69208211143695, |
| "grad_norm": 0.3993334587891021, |
| "learning_rate": 8.628748598966739e-06, |
| "loss": 0.0564, |
| "mean_token_accuracy": 0.9802764654159546, |
| "step": 1315 |
| }, |
| { |
| "epoch": 7.697947214076247, |
| "grad_norm": 0.4545815190595137, |
| "learning_rate": 8.605815255152323e-06, |
| "loss": 0.0635, |
| "mean_token_accuracy": 0.9794041439890862, |
| "step": 1316 |
| }, |
| { |
| "epoch": 7.703812316715543, |
| "grad_norm": 0.4487036384939457, |
| "learning_rate": 8.582930526818973e-06, |
| "loss": 0.0621, |
| "mean_token_accuracy": 0.9806603714823723, |
| "step": 1317 |
| }, |
| { |
| "epoch": 7.709677419354839, |
| "grad_norm": 0.4666847229368787, |
| "learning_rate": 8.560094497029008e-06, |
| "loss": 0.0586, |
| "mean_token_accuracy": 0.9819168671965599, |
| "step": 1318 |
| }, |
| { |
| "epoch": 7.715542521994135, |
| "grad_norm": 0.40195668204742174, |
| "learning_rate": 8.537307248667992e-06, |
| "loss": 0.0545, |
| "mean_token_accuracy": 0.9826847463846207, |
| "step": 1319 |
| }, |
| { |
| "epoch": 7.721407624633431, |
| "grad_norm": 0.3985988940480605, |
| "learning_rate": 8.514568864444432e-06, |
| "loss": 0.0659, |
| "mean_token_accuracy": 0.9786344021558762, |
| "step": 1320 |
| }, |
| { |
| "epoch": 7.7272727272727275, |
| "grad_norm": 0.3803942159665741, |
| "learning_rate": 8.491879426889483e-06, |
| "loss": 0.0524, |
| "mean_token_accuracy": 0.9822396486997604, |
| "step": 1321 |
| }, |
| { |
| "epoch": 7.733137829912024, |
| "grad_norm": 0.4314813792648232, |
| "learning_rate": 8.469239018356636e-06, |
| "loss": 0.0669, |
| "mean_token_accuracy": 0.9790932461619377, |
| "step": 1322 |
| }, |
| { |
| "epoch": 7.73900293255132, |
| "grad_norm": 0.4256794549163884, |
| "learning_rate": 8.446647721021435e-06, |
| "loss": 0.0704, |
| "mean_token_accuracy": 0.9789668694138527, |
| "step": 1323 |
| }, |
| { |
| "epoch": 7.744868035190616, |
| "grad_norm": 0.4015989362398405, |
| "learning_rate": 8.424105616881161e-06, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9835130125284195, |
| "step": 1324 |
| }, |
| { |
| "epoch": 7.750733137829912, |
| "grad_norm": 0.3748051299880915, |
| "learning_rate": 8.40161278775455e-06, |
| "loss": 0.059, |
| "mean_token_accuracy": 0.9828163757920265, |
| "step": 1325 |
| }, |
| { |
| "epoch": 7.756598240469208, |
| "grad_norm": 0.4175935071524493, |
| "learning_rate": 8.379169315281485e-06, |
| "loss": 0.0638, |
| "mean_token_accuracy": 0.9799411669373512, |
| "step": 1326 |
| }, |
| { |
| "epoch": 7.762463343108505, |
| "grad_norm": 0.38437401738180915, |
| "learning_rate": 8.356775280922708e-06, |
| "loss": 0.065, |
| "mean_token_accuracy": 0.9807204306125641, |
| "step": 1327 |
| }, |
| { |
| "epoch": 7.768328445747801, |
| "grad_norm": 0.3427453782640103, |
| "learning_rate": 8.334430765959522e-06, |
| "loss": 0.0617, |
| "mean_token_accuracy": 0.97793348133564, |
| "step": 1328 |
| }, |
| { |
| "epoch": 7.774193548387097, |
| "grad_norm": 0.36726795137742246, |
| "learning_rate": 8.312135851493494e-06, |
| "loss": 0.0625, |
| "mean_token_accuracy": 0.9796304106712341, |
| "step": 1329 |
| }, |
| { |
| "epoch": 7.780058651026393, |
| "grad_norm": 0.33690389206076704, |
| "learning_rate": 8.28989061844615e-06, |
| "loss": 0.0493, |
| "mean_token_accuracy": 0.9848483875393867, |
| "step": 1330 |
| }, |
| { |
| "epoch": 7.785923753665689, |
| "grad_norm": 0.3730528248631496, |
| "learning_rate": 8.267695147558705e-06, |
| "loss": 0.0664, |
| "mean_token_accuracy": 0.9790797233581543, |
| "step": 1331 |
| }, |
| { |
| "epoch": 7.7917888563049855, |
| "grad_norm": 0.4445863650560794, |
| "learning_rate": 8.245549519391758e-06, |
| "loss": 0.0618, |
| "mean_token_accuracy": 0.9811621233820915, |
| "step": 1332 |
| }, |
| { |
| "epoch": 7.797653958944282, |
| "grad_norm": 0.3886988477025664, |
| "learning_rate": 8.22345381432499e-06, |
| "loss": 0.0612, |
| "mean_token_accuracy": 0.9816948473453522, |
| "step": 1333 |
| }, |
| { |
| "epoch": 7.803519061583578, |
| "grad_norm": 0.34021133386988633, |
| "learning_rate": 8.201408112556893e-06, |
| "loss": 0.0573, |
| "mean_token_accuracy": 0.9828803986310959, |
| "step": 1334 |
| }, |
| { |
| "epoch": 7.809384164222874, |
| "grad_norm": 0.3722898209413316, |
| "learning_rate": 8.179412494104457e-06, |
| "loss": 0.059, |
| "mean_token_accuracy": 0.9795228019356728, |
| "step": 1335 |
| }, |
| { |
| "epoch": 7.81524926686217, |
| "grad_norm": 0.3530389348112938, |
| "learning_rate": 8.15746703880289e-06, |
| "loss": 0.058, |
| "mean_token_accuracy": 0.981398917734623, |
| "step": 1336 |
| }, |
| { |
| "epoch": 7.821114369501466, |
| "grad_norm": 0.35299669138074313, |
| "learning_rate": 8.135571826305339e-06, |
| "loss": 0.0524, |
| "mean_token_accuracy": 0.9821040034294128, |
| "step": 1337 |
| }, |
| { |
| "epoch": 7.826979472140763, |
| "grad_norm": 0.3591917463620187, |
| "learning_rate": 8.113726936082576e-06, |
| "loss": 0.0655, |
| "mean_token_accuracy": 0.9790498167276382, |
| "step": 1338 |
| }, |
| { |
| "epoch": 7.832844574780059, |
| "grad_norm": 0.5220426396663527, |
| "learning_rate": 8.091932447422737e-06, |
| "loss": 0.0617, |
| "mean_token_accuracy": 0.978603184223175, |
| "step": 1339 |
| }, |
| { |
| "epoch": 7.838709677419355, |
| "grad_norm": 0.34756347572701596, |
| "learning_rate": 8.070188439431005e-06, |
| "loss": 0.0575, |
| "mean_token_accuracy": 0.9806881099939346, |
| "step": 1340 |
| }, |
| { |
| "epoch": 7.844574780058651, |
| "grad_norm": 0.41402266812479505, |
| "learning_rate": 8.048494991029352e-06, |
| "loss": 0.052, |
| "mean_token_accuracy": 0.9825706034898758, |
| "step": 1341 |
| }, |
| { |
| "epoch": 7.850439882697947, |
| "grad_norm": 0.34986065635759345, |
| "learning_rate": 8.02685218095624e-06, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9828275814652443, |
| "step": 1342 |
| }, |
| { |
| "epoch": 7.8563049853372435, |
| "grad_norm": 0.35008434025300506, |
| "learning_rate": 8.005260087766318e-06, |
| "loss": 0.0619, |
| "mean_token_accuracy": 0.9789273515343666, |
| "step": 1343 |
| }, |
| { |
| "epoch": 7.86217008797654, |
| "grad_norm": 0.3628035072463874, |
| "learning_rate": 7.983718789830167e-06, |
| "loss": 0.061, |
| "mean_token_accuracy": 0.9795840755105019, |
| "step": 1344 |
| }, |
| { |
| "epoch": 7.868035190615836, |
| "grad_norm": 0.4052159012085872, |
| "learning_rate": 7.962228365333999e-06, |
| "loss": 0.064, |
| "mean_token_accuracy": 0.9813359454274178, |
| "step": 1345 |
| }, |
| { |
| "epoch": 7.873900293255132, |
| "grad_norm": 0.3695739612140032, |
| "learning_rate": 7.940788892279375e-06, |
| "loss": 0.0616, |
| "mean_token_accuracy": 0.9815887585282326, |
| "step": 1346 |
| }, |
| { |
| "epoch": 7.879765395894428, |
| "grad_norm": 0.36628043012480255, |
| "learning_rate": 7.919400448482928e-06, |
| "loss": 0.0577, |
| "mean_token_accuracy": 0.9804845973849297, |
| "step": 1347 |
| }, |
| { |
| "epoch": 7.885630498533724, |
| "grad_norm": 0.3773279787626722, |
| "learning_rate": 7.898063111576066e-06, |
| "loss": 0.06, |
| "mean_token_accuracy": 0.9801003411412239, |
| "step": 1348 |
| }, |
| { |
| "epoch": 7.891495601173021, |
| "grad_norm": 0.387347408588769, |
| "learning_rate": 7.876776959004706e-06, |
| "loss": 0.0705, |
| "mean_token_accuracy": 0.976474940776825, |
| "step": 1349 |
| }, |
| { |
| "epoch": 7.897360703812317, |
| "grad_norm": 0.3600666347332053, |
| "learning_rate": 7.855542068028981e-06, |
| "loss": 0.0525, |
| "mean_token_accuracy": 0.9814046397805214, |
| "step": 1350 |
| }, |
| { |
| "epoch": 7.903225806451613, |
| "grad_norm": 0.3168307759527497, |
| "learning_rate": 7.834358515722977e-06, |
| "loss": 0.0558, |
| "mean_token_accuracy": 0.9831864088773727, |
| "step": 1351 |
| }, |
| { |
| "epoch": 7.909090909090909, |
| "grad_norm": 0.34381906155090736, |
| "learning_rate": 7.813226378974427e-06, |
| "loss": 0.0603, |
| "mean_token_accuracy": 0.9802919253706932, |
| "step": 1352 |
| }, |
| { |
| "epoch": 7.914956011730205, |
| "grad_norm": 0.38700412978550913, |
| "learning_rate": 7.792145734484455e-06, |
| "loss": 0.0575, |
| "mean_token_accuracy": 0.9791939035058022, |
| "step": 1353 |
| }, |
| { |
| "epoch": 7.9208211143695015, |
| "grad_norm": 0.3526459336872874, |
| "learning_rate": 7.771116658767286e-06, |
| "loss": 0.0627, |
| "mean_token_accuracy": 0.9802243933081627, |
| "step": 1354 |
| }, |
| { |
| "epoch": 7.926686217008798, |
| "grad_norm": 0.399072301995629, |
| "learning_rate": 7.750139228149978e-06, |
| "loss": 0.0658, |
| "mean_token_accuracy": 0.9786151126027107, |
| "step": 1355 |
| }, |
| { |
| "epoch": 7.932551319648094, |
| "grad_norm": 0.44580675695069405, |
| "learning_rate": 7.729213518772121e-06, |
| "loss": 0.0558, |
| "mean_token_accuracy": 0.9831471219658852, |
| "step": 1356 |
| }, |
| { |
| "epoch": 7.93841642228739, |
| "grad_norm": 0.38551496553122244, |
| "learning_rate": 7.708339606585591e-06, |
| "loss": 0.0603, |
| "mean_token_accuracy": 0.9787757843732834, |
| "step": 1357 |
| }, |
| { |
| "epoch": 7.944281524926686, |
| "grad_norm": 0.3707055933975031, |
| "learning_rate": 7.687517567354266e-06, |
| "loss": 0.0679, |
| "mean_token_accuracy": 0.9794500693678856, |
| "step": 1358 |
| }, |
| { |
| "epoch": 7.9501466275659824, |
| "grad_norm": 0.3770154187455032, |
| "learning_rate": 7.66674747665373e-06, |
| "loss": 0.0533, |
| "mean_token_accuracy": 0.981170766055584, |
| "step": 1359 |
| }, |
| { |
| "epoch": 7.956011730205279, |
| "grad_norm": 0.3490341048905599, |
| "learning_rate": 7.646029409871029e-06, |
| "loss": 0.0596, |
| "mean_token_accuracy": 0.9797269105911255, |
| "step": 1360 |
| }, |
| { |
| "epoch": 7.961876832844575, |
| "grad_norm": 0.3213083693973658, |
| "learning_rate": 7.625363442204379e-06, |
| "loss": 0.0485, |
| "mean_token_accuracy": 0.9845825582742691, |
| "step": 1361 |
| }, |
| { |
| "epoch": 7.967741935483871, |
| "grad_norm": 0.31915132992627365, |
| "learning_rate": 7.604749648662892e-06, |
| "loss": 0.0568, |
| "mean_token_accuracy": 0.9816582277417183, |
| "step": 1362 |
| }, |
| { |
| "epoch": 7.973607038123167, |
| "grad_norm": 0.41843021589666207, |
| "learning_rate": 7.584188104066317e-06, |
| "loss": 0.0526, |
| "mean_token_accuracy": 0.9819516390562057, |
| "step": 1363 |
| }, |
| { |
| "epoch": 7.979472140762463, |
| "grad_norm": 0.36578962992205144, |
| "learning_rate": 7.563678883044754e-06, |
| "loss": 0.0669, |
| "mean_token_accuracy": 0.9804994836449623, |
| "step": 1364 |
| }, |
| { |
| "epoch": 7.9853372434017595, |
| "grad_norm": 0.37137732144693464, |
| "learning_rate": 7.5432220600383935e-06, |
| "loss": 0.0677, |
| "mean_token_accuracy": 0.978873997926712, |
| "step": 1365 |
| }, |
| { |
| "epoch": 7.991202346041056, |
| "grad_norm": 0.32129501323857224, |
| "learning_rate": 7.522817709297241e-06, |
| "loss": 0.0577, |
| "mean_token_accuracy": 0.9820384383201599, |
| "step": 1366 |
| }, |
| { |
| "epoch": 7.997067448680352, |
| "grad_norm": 0.3778910440397723, |
| "learning_rate": 7.502465904880849e-06, |
| "loss": 0.0592, |
| "mean_token_accuracy": 0.9810664132237434, |
| "step": 1367 |
| }, |
| { |
| "epoch": 8.0, |
| "grad_norm": 0.5872724133391657, |
| "learning_rate": 7.482166720658046e-06, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9852104634046555, |
| "step": 1368 |
| }, |
| { |
| "epoch": 8.005865102639296, |
| "grad_norm": 0.3580615397900473, |
| "learning_rate": 7.461920230306674e-06, |
| "loss": 0.0573, |
| "mean_token_accuracy": 0.9798811301589012, |
| "step": 1369 |
| }, |
| { |
| "epoch": 8.011730205278592, |
| "grad_norm": 0.30243346768551127, |
| "learning_rate": 7.441726507313318e-06, |
| "loss": 0.0495, |
| "mean_token_accuracy": 0.9828371107578278, |
| "step": 1370 |
| }, |
| { |
| "epoch": 8.017595307917889, |
| "grad_norm": 0.3074075490353524, |
| "learning_rate": 7.421585624973033e-06, |
| "loss": 0.0568, |
| "mean_token_accuracy": 0.9812061563134193, |
| "step": 1371 |
| }, |
| { |
| "epoch": 8.023460410557185, |
| "grad_norm": 0.2869959547346391, |
| "learning_rate": 7.4014976563890915e-06, |
| "loss": 0.0482, |
| "mean_token_accuracy": 0.9833677485585213, |
| "step": 1372 |
| }, |
| { |
| "epoch": 8.029325513196481, |
| "grad_norm": 0.3157738216475847, |
| "learning_rate": 7.381462674472702e-06, |
| "loss": 0.0493, |
| "mean_token_accuracy": 0.9853304252028465, |
| "step": 1373 |
| }, |
| { |
| "epoch": 8.035190615835777, |
| "grad_norm": 0.2917261306391619, |
| "learning_rate": 7.36148075194276e-06, |
| "loss": 0.0495, |
| "mean_token_accuracy": 0.9836459308862686, |
| "step": 1374 |
| }, |
| { |
| "epoch": 8.041055718475073, |
| "grad_norm": 0.31842109310012173, |
| "learning_rate": 7.341551961325574e-06, |
| "loss": 0.0507, |
| "mean_token_accuracy": 0.9838737025856972, |
| "step": 1375 |
| }, |
| { |
| "epoch": 8.04692082111437, |
| "grad_norm": 0.30617736349863633, |
| "learning_rate": 7.3216763749546025e-06, |
| "loss": 0.0464, |
| "mean_token_accuracy": 0.9870840385556221, |
| "step": 1376 |
| }, |
| { |
| "epoch": 8.052785923753666, |
| "grad_norm": 0.3244368902054701, |
| "learning_rate": 7.301854064970202e-06, |
| "loss": 0.0527, |
| "mean_token_accuracy": 0.9844281673431396, |
| "step": 1377 |
| }, |
| { |
| "epoch": 8.058651026392962, |
| "grad_norm": 0.33740527739959525, |
| "learning_rate": 7.282085103319349e-06, |
| "loss": 0.0522, |
| "mean_token_accuracy": 0.9837864488363266, |
| "step": 1378 |
| }, |
| { |
| "epoch": 8.064516129032258, |
| "grad_norm": 0.35600976208331997, |
| "learning_rate": 7.2623695617553934e-06, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9829199686646461, |
| "step": 1379 |
| }, |
| { |
| "epoch": 8.070381231671554, |
| "grad_norm": 0.34147147543660433, |
| "learning_rate": 7.242707511837781e-06, |
| "loss": 0.0476, |
| "mean_token_accuracy": 0.9852808564901352, |
| "step": 1380 |
| }, |
| { |
| "epoch": 8.07624633431085, |
| "grad_norm": 0.33417906439790784, |
| "learning_rate": 7.223099024931817e-06, |
| "loss": 0.0474, |
| "mean_token_accuracy": 0.9858130812644958, |
| "step": 1381 |
| }, |
| { |
| "epoch": 8.082111436950147, |
| "grad_norm": 0.39225200349168615, |
| "learning_rate": 7.203544172208387e-06, |
| "loss": 0.0522, |
| "mean_token_accuracy": 0.9837429746985435, |
| "step": 1382 |
| }, |
| { |
| "epoch": 8.087976539589443, |
| "grad_norm": 0.3559651753311173, |
| "learning_rate": 7.184043024643712e-06, |
| "loss": 0.054, |
| "mean_token_accuracy": 0.9832148253917694, |
| "step": 1383 |
| }, |
| { |
| "epoch": 8.093841642228739, |
| "grad_norm": 0.32846826889276376, |
| "learning_rate": 7.16459565301908e-06, |
| "loss": 0.0508, |
| "mean_token_accuracy": 0.9839031621813774, |
| "step": 1384 |
| }, |
| { |
| "epoch": 8.099706744868035, |
| "grad_norm": 0.36300700993630225, |
| "learning_rate": 7.145202127920598e-06, |
| "loss": 0.0595, |
| "mean_token_accuracy": 0.9801753610372543, |
| "step": 1385 |
| }, |
| { |
| "epoch": 8.105571847507331, |
| "grad_norm": 0.32115006218827374, |
| "learning_rate": 7.125862519738924e-06, |
| "loss": 0.0516, |
| "mean_token_accuracy": 0.9814363420009613, |
| "step": 1386 |
| }, |
| { |
| "epoch": 8.111436950146627, |
| "grad_norm": 0.3703822810157945, |
| "learning_rate": 7.106576898669031e-06, |
| "loss": 0.0551, |
| "mean_token_accuracy": 0.9836417734622955, |
| "step": 1387 |
| }, |
| { |
| "epoch": 8.117302052785924, |
| "grad_norm": 0.3596990409161751, |
| "learning_rate": 7.087345334709931e-06, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9793681129813194, |
| "step": 1388 |
| }, |
| { |
| "epoch": 8.12316715542522, |
| "grad_norm": 0.439970724935907, |
| "learning_rate": 7.068167897664433e-06, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9794286787509918, |
| "step": 1389 |
| }, |
| { |
| "epoch": 8.129032258064516, |
| "grad_norm": 0.34715662800736086, |
| "learning_rate": 7.0490446571388925e-06, |
| "loss": 0.0592, |
| "mean_token_accuracy": 0.982256643474102, |
| "step": 1390 |
| }, |
| { |
| "epoch": 8.134897360703812, |
| "grad_norm": 0.3397432205528309, |
| "learning_rate": 7.0299756825429465e-06, |
| "loss": 0.0507, |
| "mean_token_accuracy": 0.9833914712071419, |
| "step": 1391 |
| }, |
| { |
| "epoch": 8.140762463343108, |
| "grad_norm": 0.2717469828760133, |
| "learning_rate": 7.010961043089277e-06, |
| "loss": 0.0436, |
| "mean_token_accuracy": 0.9865109696984291, |
| "step": 1392 |
| }, |
| { |
| "epoch": 8.146627565982405, |
| "grad_norm": 0.3481112115021732, |
| "learning_rate": 6.992000807793333e-06, |
| "loss": 0.0492, |
| "mean_token_accuracy": 0.9850409254431725, |
| "step": 1393 |
| }, |
| { |
| "epoch": 8.1524926686217, |
| "grad_norm": 0.34563812676429884, |
| "learning_rate": 6.973095045473124e-06, |
| "loss": 0.0555, |
| "mean_token_accuracy": 0.982826754450798, |
| "step": 1394 |
| }, |
| { |
| "epoch": 8.158357771260997, |
| "grad_norm": 0.35087836917639226, |
| "learning_rate": 6.954243824748922e-06, |
| "loss": 0.0578, |
| "mean_token_accuracy": 0.9829492494463921, |
| "step": 1395 |
| }, |
| { |
| "epoch": 8.164222873900293, |
| "grad_norm": 0.2905320705631627, |
| "learning_rate": 6.93544721404305e-06, |
| "loss": 0.05, |
| "mean_token_accuracy": 0.9838858619332314, |
| "step": 1396 |
| }, |
| { |
| "epoch": 8.17008797653959, |
| "grad_norm": 0.3082826739718294, |
| "learning_rate": 6.916705281579612e-06, |
| "loss": 0.0519, |
| "mean_token_accuracy": 0.9831016063690186, |
| "step": 1397 |
| }, |
| { |
| "epoch": 8.175953079178885, |
| "grad_norm": 0.36435966366625105, |
| "learning_rate": 6.898018095384252e-06, |
| "loss": 0.0633, |
| "mean_token_accuracy": 0.9801773875951767, |
| "step": 1398 |
| }, |
| { |
| "epoch": 8.181818181818182, |
| "grad_norm": 0.3803019536472587, |
| "learning_rate": 6.879385723283913e-06, |
| "loss": 0.0521, |
| "mean_token_accuracy": 0.9834791570901871, |
| "step": 1399 |
| }, |
| { |
| "epoch": 8.187683284457478, |
| "grad_norm": 0.3532847547848805, |
| "learning_rate": 6.8608082329065775e-06, |
| "loss": 0.0532, |
| "mean_token_accuracy": 0.9834351092576981, |
| "step": 1400 |
| }, |
| { |
| "epoch": 8.193548387096774, |
| "grad_norm": 0.38911936414789794, |
| "learning_rate": 6.842285691681032e-06, |
| "loss": 0.0591, |
| "mean_token_accuracy": 0.9815716445446014, |
| "step": 1401 |
| }, |
| { |
| "epoch": 8.19941348973607, |
| "grad_norm": 0.3428278171357225, |
| "learning_rate": 6.8238181668366244e-06, |
| "loss": 0.0478, |
| "mean_token_accuracy": 0.9838706254959106, |
| "step": 1402 |
| }, |
| { |
| "epoch": 8.205278592375366, |
| "grad_norm": 0.33191960396480547, |
| "learning_rate": 6.805405725403006e-06, |
| "loss": 0.0572, |
| "mean_token_accuracy": 0.9818863347172737, |
| "step": 1403 |
| }, |
| { |
| "epoch": 8.211143695014663, |
| "grad_norm": 0.36925272182757807, |
| "learning_rate": 6.787048434209906e-06, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.9810345396399498, |
| "step": 1404 |
| }, |
| { |
| "epoch": 8.217008797653959, |
| "grad_norm": 0.382501882480973, |
| "learning_rate": 6.768746359886882e-06, |
| "loss": 0.0546, |
| "mean_token_accuracy": 0.9816075041890144, |
| "step": 1405 |
| }, |
| { |
| "epoch": 8.222873900293255, |
| "grad_norm": 0.3595201095049301, |
| "learning_rate": 6.750499568863061e-06, |
| "loss": 0.0556, |
| "mean_token_accuracy": 0.9821967929601669, |
| "step": 1406 |
| }, |
| { |
| "epoch": 8.228739002932551, |
| "grad_norm": 0.38016578185329586, |
| "learning_rate": 6.732308127366931e-06, |
| "loss": 0.0624, |
| "mean_token_accuracy": 0.9800859242677689, |
| "step": 1407 |
| }, |
| { |
| "epoch": 8.234604105571847, |
| "grad_norm": 0.38842597342794766, |
| "learning_rate": 6.714172101426077e-06, |
| "loss": 0.0582, |
| "mean_token_accuracy": 0.9795337095856667, |
| "step": 1408 |
| }, |
| { |
| "epoch": 8.240469208211143, |
| "grad_norm": 0.26062246060123856, |
| "learning_rate": 6.696091556866948e-06, |
| "loss": 0.0435, |
| "mean_token_accuracy": 0.9856739714741707, |
| "step": 1409 |
| }, |
| { |
| "epoch": 8.24633431085044, |
| "grad_norm": 0.37049498473441145, |
| "learning_rate": 6.678066559314622e-06, |
| "loss": 0.0606, |
| "mean_token_accuracy": 0.9812714830040932, |
| "step": 1410 |
| }, |
| { |
| "epoch": 8.252199413489736, |
| "grad_norm": 0.3989598961651288, |
| "learning_rate": 6.660097174192556e-06, |
| "loss": 0.056, |
| "mean_token_accuracy": 0.9807810261845589, |
| "step": 1411 |
| }, |
| { |
| "epoch": 8.258064516129032, |
| "grad_norm": 0.3749156284590479, |
| "learning_rate": 6.642183466722363e-06, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9821376726031303, |
| "step": 1412 |
| }, |
| { |
| "epoch": 8.263929618768328, |
| "grad_norm": 0.35702668344077504, |
| "learning_rate": 6.624325501923565e-06, |
| "loss": 0.0563, |
| "mean_token_accuracy": 0.9820006415247917, |
| "step": 1413 |
| }, |
| { |
| "epoch": 8.269794721407624, |
| "grad_norm": 0.40663026140935876, |
| "learning_rate": 6.606523344613362e-06, |
| "loss": 0.0597, |
| "mean_token_accuracy": 0.9796445891261101, |
| "step": 1414 |
| }, |
| { |
| "epoch": 8.27565982404692, |
| "grad_norm": 0.34723841867587746, |
| "learning_rate": 6.588777059406397e-06, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9835962206125259, |
| "step": 1415 |
| }, |
| { |
| "epoch": 8.281524926686217, |
| "grad_norm": 0.312293892386182, |
| "learning_rate": 6.571086710714516e-06, |
| "loss": 0.0465, |
| "mean_token_accuracy": 0.9838526993989944, |
| "step": 1416 |
| }, |
| { |
| "epoch": 8.287390029325513, |
| "grad_norm": 0.3524237439107806, |
| "learning_rate": 6.553452362746543e-06, |
| "loss": 0.0585, |
| "mean_token_accuracy": 0.980175569653511, |
| "step": 1417 |
| }, |
| { |
| "epoch": 8.29325513196481, |
| "grad_norm": 0.39261505836168864, |
| "learning_rate": 6.5358740795080335e-06, |
| "loss": 0.0639, |
| "mean_token_accuracy": 0.9783350303769112, |
| "step": 1418 |
| }, |
| { |
| "epoch": 8.299120234604105, |
| "grad_norm": 0.4192403382994383, |
| "learning_rate": 6.518351924801061e-06, |
| "loss": 0.0619, |
| "mean_token_accuracy": 0.9807342141866684, |
| "step": 1419 |
| }, |
| { |
| "epoch": 8.304985337243401, |
| "grad_norm": 0.28497911177981083, |
| "learning_rate": 6.500885962223969e-06, |
| "loss": 0.0511, |
| "mean_token_accuracy": 0.9853582382202148, |
| "step": 1420 |
| }, |
| { |
| "epoch": 8.310850439882698, |
| "grad_norm": 0.3635257303812234, |
| "learning_rate": 6.483476255171146e-06, |
| "loss": 0.0613, |
| "mean_token_accuracy": 0.9806642904877663, |
| "step": 1421 |
| }, |
| { |
| "epoch": 8.316715542521994, |
| "grad_norm": 0.3284725590763597, |
| "learning_rate": 6.4661228668328015e-06, |
| "loss": 0.0504, |
| "mean_token_accuracy": 0.9835583493113518, |
| "step": 1422 |
| }, |
| { |
| "epoch": 8.32258064516129, |
| "grad_norm": 0.3809611371335918, |
| "learning_rate": 6.448825860194722e-06, |
| "loss": 0.0596, |
| "mean_token_accuracy": 0.9815945476293564, |
| "step": 1423 |
| }, |
| { |
| "epoch": 8.328445747800586, |
| "grad_norm": 0.26700828855473413, |
| "learning_rate": 6.431585298038057e-06, |
| "loss": 0.0416, |
| "mean_token_accuracy": 0.9862992838025093, |
| "step": 1424 |
| }, |
| { |
| "epoch": 8.334310850439882, |
| "grad_norm": 0.31427705205892353, |
| "learning_rate": 6.414401242939087e-06, |
| "loss": 0.0579, |
| "mean_token_accuracy": 0.9815279394388199, |
| "step": 1425 |
| }, |
| { |
| "epoch": 8.340175953079179, |
| "grad_norm": 0.36011252789263, |
| "learning_rate": 6.397273757268987e-06, |
| "loss": 0.0508, |
| "mean_token_accuracy": 0.9852896630764008, |
| "step": 1426 |
| }, |
| { |
| "epoch": 8.346041055718475, |
| "grad_norm": 0.3739186167424333, |
| "learning_rate": 6.380202903193616e-06, |
| "loss": 0.0626, |
| "mean_token_accuracy": 0.9816398844122887, |
| "step": 1427 |
| }, |
| { |
| "epoch": 8.351906158357771, |
| "grad_norm": 0.3711990215340053, |
| "learning_rate": 6.363188742673281e-06, |
| "loss": 0.0556, |
| "mean_token_accuracy": 0.9811735600233078, |
| "step": 1428 |
| }, |
| { |
| "epoch": 8.357771260997067, |
| "grad_norm": 0.33932075687055624, |
| "learning_rate": 6.346231337462513e-06, |
| "loss": 0.051, |
| "mean_token_accuracy": 0.9825574532151222, |
| "step": 1429 |
| }, |
| { |
| "epoch": 8.363636363636363, |
| "grad_norm": 0.44161637530546255, |
| "learning_rate": 6.329330749109839e-06, |
| "loss": 0.0667, |
| "mean_token_accuracy": 0.978769101202488, |
| "step": 1430 |
| }, |
| { |
| "epoch": 8.36950146627566, |
| "grad_norm": 0.346684374066376, |
| "learning_rate": 6.312487038957573e-06, |
| "loss": 0.0558, |
| "mean_token_accuracy": 0.9824666082859039, |
| "step": 1431 |
| }, |
| { |
| "epoch": 8.375366568914956, |
| "grad_norm": 0.32798515060125477, |
| "learning_rate": 6.295700268141579e-06, |
| "loss": 0.0479, |
| "mean_token_accuracy": 0.9828667864203453, |
| "step": 1432 |
| }, |
| { |
| "epoch": 8.381231671554252, |
| "grad_norm": 0.3050242296207711, |
| "learning_rate": 6.2789704975910574e-06, |
| "loss": 0.0472, |
| "mean_token_accuracy": 0.9842007234692574, |
| "step": 1433 |
| }, |
| { |
| "epoch": 8.387096774193548, |
| "grad_norm": 0.342916785749358, |
| "learning_rate": 6.262297788028316e-06, |
| "loss": 0.0489, |
| "mean_token_accuracy": 0.9815072119235992, |
| "step": 1434 |
| }, |
| { |
| "epoch": 8.392961876832844, |
| "grad_norm": 0.36151402107915315, |
| "learning_rate": 6.245682199968556e-06, |
| "loss": 0.0568, |
| "mean_token_accuracy": 0.9811645448207855, |
| "step": 1435 |
| }, |
| { |
| "epoch": 8.39882697947214, |
| "grad_norm": 0.33322534234356715, |
| "learning_rate": 6.229123793719656e-06, |
| "loss": 0.0532, |
| "mean_token_accuracy": 0.9831674918532372, |
| "step": 1436 |
| }, |
| { |
| "epoch": 8.404692082111437, |
| "grad_norm": 0.3283361999533351, |
| "learning_rate": 6.21262262938194e-06, |
| "loss": 0.0514, |
| "mean_token_accuracy": 0.9837821051478386, |
| "step": 1437 |
| }, |
| { |
| "epoch": 8.410557184750733, |
| "grad_norm": 0.3401690287440309, |
| "learning_rate": 6.196178766847969e-06, |
| "loss": 0.0534, |
| "mean_token_accuracy": 0.9818862527608871, |
| "step": 1438 |
| }, |
| { |
| "epoch": 8.416422287390029, |
| "grad_norm": 0.37528738834672287, |
| "learning_rate": 6.1797922658023264e-06, |
| "loss": 0.0635, |
| "mean_token_accuracy": 0.978727675974369, |
| "step": 1439 |
| }, |
| { |
| "epoch": 8.422287390029325, |
| "grad_norm": 0.3149172294095641, |
| "learning_rate": 6.16346318572139e-06, |
| "loss": 0.0541, |
| "mean_token_accuracy": 0.9831294119358063, |
| "step": 1440 |
| }, |
| { |
| "epoch": 8.428152492668621, |
| "grad_norm": 0.3802962509030587, |
| "learning_rate": 6.147191585873128e-06, |
| "loss": 0.0605, |
| "mean_token_accuracy": 0.981091320514679, |
| "step": 1441 |
| }, |
| { |
| "epoch": 8.434017595307918, |
| "grad_norm": 0.325742841513319, |
| "learning_rate": 6.130977525316878e-06, |
| "loss": 0.0567, |
| "mean_token_accuracy": 0.9818108677864075, |
| "step": 1442 |
| }, |
| { |
| "epoch": 8.439882697947214, |
| "grad_norm": 0.350918927222633, |
| "learning_rate": 6.114821062903125e-06, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9823700860142708, |
| "step": 1443 |
| }, |
| { |
| "epoch": 8.44574780058651, |
| "grad_norm": 0.33888867438286, |
| "learning_rate": 6.098722257273303e-06, |
| "loss": 0.054, |
| "mean_token_accuracy": 0.980469599366188, |
| "step": 1444 |
| }, |
| { |
| "epoch": 8.451612903225806, |
| "grad_norm": 0.3986416341368361, |
| "learning_rate": 6.082681166859579e-06, |
| "loss": 0.0608, |
| "mean_token_accuracy": 0.9823858961462975, |
| "step": 1445 |
| }, |
| { |
| "epoch": 8.457478005865102, |
| "grad_norm": 0.3321438872721226, |
| "learning_rate": 6.066697849884629e-06, |
| "loss": 0.0588, |
| "mean_token_accuracy": 0.9816719517111778, |
| "step": 1446 |
| }, |
| { |
| "epoch": 8.463343108504398, |
| "grad_norm": 0.29731617560561274, |
| "learning_rate": 6.0507723643614415e-06, |
| "loss": 0.043, |
| "mean_token_accuracy": 0.9855259880423546, |
| "step": 1447 |
| }, |
| { |
| "epoch": 8.469208211143695, |
| "grad_norm": 0.3544585856707004, |
| "learning_rate": 6.034904768093095e-06, |
| "loss": 0.0564, |
| "mean_token_accuracy": 0.9806393161416054, |
| "step": 1448 |
| }, |
| { |
| "epoch": 8.47507331378299, |
| "grad_norm": 0.3566904543758243, |
| "learning_rate": 6.019095118672557e-06, |
| "loss": 0.0607, |
| "mean_token_accuracy": 0.9797836020588875, |
| "step": 1449 |
| }, |
| { |
| "epoch": 8.480938416422287, |
| "grad_norm": 0.4011506112780418, |
| "learning_rate": 6.003343473482469e-06, |
| "loss": 0.0561, |
| "mean_token_accuracy": 0.9821067750453949, |
| "step": 1450 |
| }, |
| { |
| "epoch": 8.486803519061583, |
| "grad_norm": 0.39239441137933195, |
| "learning_rate": 5.98764988969494e-06, |
| "loss": 0.059, |
| "mean_token_accuracy": 0.9805739000439644, |
| "step": 1451 |
| }, |
| { |
| "epoch": 8.49266862170088, |
| "grad_norm": 0.29869881818062005, |
| "learning_rate": 5.972014424271344e-06, |
| "loss": 0.0486, |
| "mean_token_accuracy": 0.9846675246953964, |
| "step": 1452 |
| }, |
| { |
| "epoch": 8.498533724340176, |
| "grad_norm": 0.32409437830897814, |
| "learning_rate": 5.956437133962103e-06, |
| "loss": 0.0545, |
| "mean_token_accuracy": 0.9832024946808815, |
| "step": 1453 |
| }, |
| { |
| "epoch": 8.504398826979472, |
| "grad_norm": 0.44350748801496986, |
| "learning_rate": 5.94091807530649e-06, |
| "loss": 0.0573, |
| "mean_token_accuracy": 0.9812595695257187, |
| "step": 1454 |
| }, |
| { |
| "epoch": 8.510263929618768, |
| "grad_norm": 0.375249797161884, |
| "learning_rate": 5.925457304632421e-06, |
| "loss": 0.0581, |
| "mean_token_accuracy": 0.9809895157814026, |
| "step": 1455 |
| }, |
| { |
| "epoch": 8.516129032258064, |
| "grad_norm": 0.3697637091968322, |
| "learning_rate": 5.91005487805625e-06, |
| "loss": 0.0626, |
| "mean_token_accuracy": 0.9802481904625893, |
| "step": 1456 |
| }, |
| { |
| "epoch": 8.52199413489736, |
| "grad_norm": 0.350870030605446, |
| "learning_rate": 5.894710851482563e-06, |
| "loss": 0.0547, |
| "mean_token_accuracy": 0.9832234531641006, |
| "step": 1457 |
| }, |
| { |
| "epoch": 8.527859237536656, |
| "grad_norm": 0.365055384156501, |
| "learning_rate": 5.879425280603981e-06, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9823313876986504, |
| "step": 1458 |
| }, |
| { |
| "epoch": 8.533724340175953, |
| "grad_norm": 0.3566574779135655, |
| "learning_rate": 5.864198220900952e-06, |
| "loss": 0.0523, |
| "mean_token_accuracy": 0.9819063544273376, |
| "step": 1459 |
| }, |
| { |
| "epoch": 8.539589442815249, |
| "grad_norm": 0.36841885684673653, |
| "learning_rate": 5.849029727641552e-06, |
| "loss": 0.0551, |
| "mean_token_accuracy": 0.9813630729913712, |
| "step": 1460 |
| }, |
| { |
| "epoch": 8.545454545454545, |
| "grad_norm": 0.3518709497305404, |
| "learning_rate": 5.833919855881286e-06, |
| "loss": 0.0567, |
| "mean_token_accuracy": 0.9804489463567734, |
| "step": 1461 |
| }, |
| { |
| "epoch": 8.551319648093841, |
| "grad_norm": 0.3453349736449673, |
| "learning_rate": 5.818868660462886e-06, |
| "loss": 0.0518, |
| "mean_token_accuracy": 0.9826655164361, |
| "step": 1462 |
| }, |
| { |
| "epoch": 8.557184750733137, |
| "grad_norm": 0.3147211495267202, |
| "learning_rate": 5.803876196016114e-06, |
| "loss": 0.0525, |
| "mean_token_accuracy": 0.9852809086441994, |
| "step": 1463 |
| }, |
| { |
| "epoch": 8.563049853372434, |
| "grad_norm": 0.3292651610864462, |
| "learning_rate": 5.788942516957561e-06, |
| "loss": 0.0521, |
| "mean_token_accuracy": 0.9832091629505157, |
| "step": 1464 |
| }, |
| { |
| "epoch": 8.56891495601173, |
| "grad_norm": 0.4001132741069757, |
| "learning_rate": 5.774067677490448e-06, |
| "loss": 0.0609, |
| "mean_token_accuracy": 0.9808182790875435, |
| "step": 1465 |
| }, |
| { |
| "epoch": 8.574780058651026, |
| "grad_norm": 0.3394619974830389, |
| "learning_rate": 5.759251731604435e-06, |
| "loss": 0.0483, |
| "mean_token_accuracy": 0.9834600687026978, |
| "step": 1466 |
| }, |
| { |
| "epoch": 8.580645161290322, |
| "grad_norm": 0.3843990415825973, |
| "learning_rate": 5.744494733075424e-06, |
| "loss": 0.0569, |
| "mean_token_accuracy": 0.9812219887971878, |
| "step": 1467 |
| }, |
| { |
| "epoch": 8.586510263929618, |
| "grad_norm": 0.313015761345117, |
| "learning_rate": 5.729796735465359e-06, |
| "loss": 0.0556, |
| "mean_token_accuracy": 0.9805843010544777, |
| "step": 1468 |
| }, |
| { |
| "epoch": 8.592375366568914, |
| "grad_norm": 0.397873067980578, |
| "learning_rate": 5.7151577921220356e-06, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.980902798473835, |
| "step": 1469 |
| }, |
| { |
| "epoch": 8.59824046920821, |
| "grad_norm": 0.2775157728708668, |
| "learning_rate": 5.7005779561789046e-06, |
| "loss": 0.046, |
| "mean_token_accuracy": 0.9848226681351662, |
| "step": 1470 |
| }, |
| { |
| "epoch": 8.604105571847507, |
| "grad_norm": 0.32262469073810285, |
| "learning_rate": 5.686057280554882e-06, |
| "loss": 0.0508, |
| "mean_token_accuracy": 0.9834897667169571, |
| "step": 1471 |
| }, |
| { |
| "epoch": 8.609970674486803, |
| "grad_norm": 0.3381801962057254, |
| "learning_rate": 5.671595817954157e-06, |
| "loss": 0.0549, |
| "mean_token_accuracy": 0.9829668179154396, |
| "step": 1472 |
| }, |
| { |
| "epoch": 8.6158357771261, |
| "grad_norm": 0.4125328725756552, |
| "learning_rate": 5.657193620865997e-06, |
| "loss": 0.0539, |
| "mean_token_accuracy": 0.9830298721790314, |
| "step": 1473 |
| }, |
| { |
| "epoch": 8.621700879765395, |
| "grad_norm": 0.42397175137698584, |
| "learning_rate": 5.642850741564562e-06, |
| "loss": 0.0597, |
| "mean_token_accuracy": 0.981752060353756, |
| "step": 1474 |
| }, |
| { |
| "epoch": 8.627565982404692, |
| "grad_norm": 0.3678506175376002, |
| "learning_rate": 5.62856723210871e-06, |
| "loss": 0.0569, |
| "mean_token_accuracy": 0.9808289110660553, |
| "step": 1475 |
| }, |
| { |
| "epoch": 8.633431085043988, |
| "grad_norm": 0.38044929459613297, |
| "learning_rate": 5.614343144341814e-06, |
| "loss": 0.0579, |
| "mean_token_accuracy": 0.9798820838332176, |
| "step": 1476 |
| }, |
| { |
| "epoch": 8.639296187683284, |
| "grad_norm": 0.3173567806897864, |
| "learning_rate": 5.600178529891564e-06, |
| "loss": 0.0492, |
| "mean_token_accuracy": 0.9839482828974724, |
| "step": 1477 |
| }, |
| { |
| "epoch": 8.64516129032258, |
| "grad_norm": 0.39392861788504485, |
| "learning_rate": 5.58607344016979e-06, |
| "loss": 0.0637, |
| "mean_token_accuracy": 0.9784178957343102, |
| "step": 1478 |
| }, |
| { |
| "epoch": 8.651026392961876, |
| "grad_norm": 0.3357796568293705, |
| "learning_rate": 5.5720279263722795e-06, |
| "loss": 0.0528, |
| "mean_token_accuracy": 0.981262743473053, |
| "step": 1479 |
| }, |
| { |
| "epoch": 8.656891495601172, |
| "grad_norm": 0.3159822158252555, |
| "learning_rate": 5.558042039478564e-06, |
| "loss": 0.0514, |
| "mean_token_accuracy": 0.9824788197875023, |
| "step": 1480 |
| }, |
| { |
| "epoch": 8.662756598240469, |
| "grad_norm": 0.38617284468702967, |
| "learning_rate": 5.544115830251769e-06, |
| "loss": 0.0626, |
| "mean_token_accuracy": 0.9803685322403908, |
| "step": 1481 |
| }, |
| { |
| "epoch": 8.668621700879765, |
| "grad_norm": 0.33796336603448435, |
| "learning_rate": 5.530249349238407e-06, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9826963320374489, |
| "step": 1482 |
| }, |
| { |
| "epoch": 8.674486803519061, |
| "grad_norm": 0.3953276485009274, |
| "learning_rate": 5.516442646768207e-06, |
| "loss": 0.0616, |
| "mean_token_accuracy": 0.9777121841907501, |
| "step": 1483 |
| }, |
| { |
| "epoch": 8.680351906158357, |
| "grad_norm": 0.3483816517700947, |
| "learning_rate": 5.502695772953922e-06, |
| "loss": 0.0615, |
| "mean_token_accuracy": 0.979569785296917, |
| "step": 1484 |
| }, |
| { |
| "epoch": 8.686217008797653, |
| "grad_norm": 0.3637996842940104, |
| "learning_rate": 5.489008777691151e-06, |
| "loss": 0.0549, |
| "mean_token_accuracy": 0.9833445623517036, |
| "step": 1485 |
| }, |
| { |
| "epoch": 8.69208211143695, |
| "grad_norm": 0.354900472955803, |
| "learning_rate": 5.475381710658161e-06, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9816870614886284, |
| "step": 1486 |
| }, |
| { |
| "epoch": 8.697947214076246, |
| "grad_norm": 0.4715464896596745, |
| "learning_rate": 5.4618146213157e-06, |
| "loss": 0.064, |
| "mean_token_accuracy": 0.9779196679592133, |
| "step": 1487 |
| }, |
| { |
| "epoch": 8.703812316715542, |
| "grad_norm": 0.34964552430972173, |
| "learning_rate": 5.448307558906822e-06, |
| "loss": 0.059, |
| "mean_token_accuracy": 0.9810210913419724, |
| "step": 1488 |
| }, |
| { |
| "epoch": 8.709677419354838, |
| "grad_norm": 0.35199334637919955, |
| "learning_rate": 5.434860572456711e-06, |
| "loss": 0.0532, |
| "mean_token_accuracy": 0.9808976799249649, |
| "step": 1489 |
| }, |
| { |
| "epoch": 8.715542521994134, |
| "grad_norm": 0.34772290388116905, |
| "learning_rate": 5.421473710772496e-06, |
| "loss": 0.0574, |
| "mean_token_accuracy": 0.9827618896961212, |
| "step": 1490 |
| }, |
| { |
| "epoch": 8.72140762463343, |
| "grad_norm": 0.3152598232442663, |
| "learning_rate": 5.408147022443077e-06, |
| "loss": 0.0509, |
| "mean_token_accuracy": 0.9823957309126854, |
| "step": 1491 |
| }, |
| { |
| "epoch": 8.727272727272727, |
| "grad_norm": 0.3297027726384655, |
| "learning_rate": 5.39488055583895e-06, |
| "loss": 0.0581, |
| "mean_token_accuracy": 0.9830864146351814, |
| "step": 1492 |
| }, |
| { |
| "epoch": 8.733137829912023, |
| "grad_norm": 0.40568193564006616, |
| "learning_rate": 5.3816743591120365e-06, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9806106314063072, |
| "step": 1493 |
| }, |
| { |
| "epoch": 8.739002932551319, |
| "grad_norm": 0.33951733127086026, |
| "learning_rate": 5.368528480195492e-06, |
| "loss": 0.0574, |
| "mean_token_accuracy": 0.9826337546110153, |
| "step": 1494 |
| }, |
| { |
| "epoch": 8.744868035190615, |
| "grad_norm": 0.27702511846746336, |
| "learning_rate": 5.355442966803544e-06, |
| "loss": 0.0455, |
| "mean_token_accuracy": 0.9840430989861488, |
| "step": 1495 |
| }, |
| { |
| "epoch": 8.750733137829911, |
| "grad_norm": 0.37541384151540996, |
| "learning_rate": 5.342417866431326e-06, |
| "loss": 0.0607, |
| "mean_token_accuracy": 0.9778427630662918, |
| "step": 1496 |
| }, |
| { |
| "epoch": 8.756598240469208, |
| "grad_norm": 0.37394179575858666, |
| "learning_rate": 5.329453226354692e-06, |
| "loss": 0.0584, |
| "mean_token_accuracy": 0.9819178581237793, |
| "step": 1497 |
| }, |
| { |
| "epoch": 8.762463343108504, |
| "grad_norm": 0.34900072180139713, |
| "learning_rate": 5.31654909363005e-06, |
| "loss": 0.055, |
| "mean_token_accuracy": 0.9832709729671478, |
| "step": 1498 |
| }, |
| { |
| "epoch": 8.7683284457478, |
| "grad_norm": 0.40720615945503313, |
| "learning_rate": 5.303705515094187e-06, |
| "loss": 0.0674, |
| "mean_token_accuracy": 0.9800705909729004, |
| "step": 1499 |
| }, |
| { |
| "epoch": 8.774193548387096, |
| "grad_norm": 0.419760149260306, |
| "learning_rate": 5.290922537364109e-06, |
| "loss": 0.0663, |
| "mean_token_accuracy": 0.9766133427619934, |
| "step": 1500 |
| }, |
| { |
| "epoch": 8.780058651026392, |
| "grad_norm": 0.3368715613053454, |
| "learning_rate": 5.278200206836861e-06, |
| "loss": 0.0578, |
| "mean_token_accuracy": 0.9801322594285011, |
| "step": 1501 |
| }, |
| { |
| "epoch": 8.785923753665688, |
| "grad_norm": 0.3759270192655754, |
| "learning_rate": 5.265538569689365e-06, |
| "loss": 0.0546, |
| "mean_token_accuracy": 0.9813186898827553, |
| "step": 1502 |
| }, |
| { |
| "epoch": 8.791788856304985, |
| "grad_norm": 0.3253811690531697, |
| "learning_rate": 5.25293767187825e-06, |
| "loss": 0.0521, |
| "mean_token_accuracy": 0.9838348925113678, |
| "step": 1503 |
| }, |
| { |
| "epoch": 8.79765395894428, |
| "grad_norm": 0.40681946489582455, |
| "learning_rate": 5.240397559139685e-06, |
| "loss": 0.0582, |
| "mean_token_accuracy": 0.9799980223178864, |
| "step": 1504 |
| }, |
| { |
| "epoch": 8.803519061583577, |
| "grad_norm": 0.3113077584917748, |
| "learning_rate": 5.227918276989215e-06, |
| "loss": 0.0542, |
| "mean_token_accuracy": 0.9809886813163757, |
| "step": 1505 |
| }, |
| { |
| "epoch": 8.809384164222873, |
| "grad_norm": 0.31490285722753797, |
| "learning_rate": 5.2154998707215976e-06, |
| "loss": 0.0537, |
| "mean_token_accuracy": 0.9806480631232262, |
| "step": 1506 |
| }, |
| { |
| "epoch": 8.81524926686217, |
| "grad_norm": 0.3335702216609551, |
| "learning_rate": 5.203142385410628e-06, |
| "loss": 0.0524, |
| "mean_token_accuracy": 0.9840300157666206, |
| "step": 1507 |
| }, |
| { |
| "epoch": 8.821114369501466, |
| "grad_norm": 0.4849332504235522, |
| "learning_rate": 5.190845865908987e-06, |
| "loss": 0.0528, |
| "mean_token_accuracy": 0.9802124425768852, |
| "step": 1508 |
| }, |
| { |
| "epoch": 8.826979472140762, |
| "grad_norm": 0.3769541452912475, |
| "learning_rate": 5.178610356848075e-06, |
| "loss": 0.0567, |
| "mean_token_accuracy": 0.9822128117084503, |
| "step": 1509 |
| }, |
| { |
| "epoch": 8.832844574780058, |
| "grad_norm": 0.37508843809475206, |
| "learning_rate": 5.166435902637848e-06, |
| "loss": 0.0517, |
| "mean_token_accuracy": 0.9822444394230843, |
| "step": 1510 |
| }, |
| { |
| "epoch": 8.838709677419354, |
| "grad_norm": 0.3221405938386644, |
| "learning_rate": 5.154322547466658e-06, |
| "loss": 0.0517, |
| "mean_token_accuracy": 0.9838709086179733, |
| "step": 1511 |
| }, |
| { |
| "epoch": 8.84457478005865, |
| "grad_norm": 0.3561073312103975, |
| "learning_rate": 5.142270335301095e-06, |
| "loss": 0.0517, |
| "mean_token_accuracy": 0.9825182780623436, |
| "step": 1512 |
| }, |
| { |
| "epoch": 8.850439882697946, |
| "grad_norm": 0.34042481371976013, |
| "learning_rate": 5.130279309885817e-06, |
| "loss": 0.0527, |
| "mean_token_accuracy": 0.9820515289902687, |
| "step": 1513 |
| }, |
| { |
| "epoch": 8.856304985337243, |
| "grad_norm": 0.4953533693345446, |
| "learning_rate": 5.118349514743404e-06, |
| "loss": 0.064, |
| "mean_token_accuracy": 0.979572020471096, |
| "step": 1514 |
| }, |
| { |
| "epoch": 8.862170087976539, |
| "grad_norm": 0.45447927852562464, |
| "learning_rate": 5.1064809931741975e-06, |
| "loss": 0.0669, |
| "mean_token_accuracy": 0.9793067052960396, |
| "step": 1515 |
| }, |
| { |
| "epoch": 8.868035190615835, |
| "grad_norm": 0.31567146504603205, |
| "learning_rate": 5.094673788256137e-06, |
| "loss": 0.0519, |
| "mean_token_accuracy": 0.9847134873270988, |
| "step": 1516 |
| }, |
| { |
| "epoch": 8.873900293255131, |
| "grad_norm": 0.41749467992256695, |
| "learning_rate": 5.082927942844603e-06, |
| "loss": 0.062, |
| "mean_token_accuracy": 0.9803328365087509, |
| "step": 1517 |
| }, |
| { |
| "epoch": 8.879765395894427, |
| "grad_norm": 0.3155755372513205, |
| "learning_rate": 5.0712434995722734e-06, |
| "loss": 0.056, |
| "mean_token_accuracy": 0.9794782549142838, |
| "step": 1518 |
| }, |
| { |
| "epoch": 8.885630498533724, |
| "grad_norm": 0.38709916701582664, |
| "learning_rate": 5.059620500848964e-06, |
| "loss": 0.057, |
| "mean_token_accuracy": 0.9829104915261269, |
| "step": 1519 |
| }, |
| { |
| "epoch": 8.89149560117302, |
| "grad_norm": 0.35821044241742506, |
| "learning_rate": 5.048058988861455e-06, |
| "loss": 0.0562, |
| "mean_token_accuracy": 0.9820261895656586, |
| "step": 1520 |
| }, |
| { |
| "epoch": 8.897360703812316, |
| "grad_norm": 0.3115758973353694, |
| "learning_rate": 5.0365590055733715e-06, |
| "loss": 0.0525, |
| "mean_token_accuracy": 0.9838507696986198, |
| "step": 1521 |
| }, |
| { |
| "epoch": 8.903225806451612, |
| "grad_norm": 0.399494434274281, |
| "learning_rate": 5.025120592725009e-06, |
| "loss": 0.0622, |
| "mean_token_accuracy": 0.980305053293705, |
| "step": 1522 |
| }, |
| { |
| "epoch": 8.909090909090908, |
| "grad_norm": 0.38255414855046266, |
| "learning_rate": 5.013743791833187e-06, |
| "loss": 0.0581, |
| "mean_token_accuracy": 0.9823561608791351, |
| "step": 1523 |
| }, |
| { |
| "epoch": 8.914956011730204, |
| "grad_norm": 0.3281961203877841, |
| "learning_rate": 5.002428644191094e-06, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9815320670604706, |
| "step": 1524 |
| }, |
| { |
| "epoch": 8.9208211143695, |
| "grad_norm": 0.3245875923187815, |
| "learning_rate": 4.991175190868148e-06, |
| "loss": 0.0573, |
| "mean_token_accuracy": 0.9829541444778442, |
| "step": 1525 |
| }, |
| { |
| "epoch": 8.926686217008797, |
| "grad_norm": 0.3390424334484287, |
| "learning_rate": 4.9799834727098415e-06, |
| "loss": 0.0501, |
| "mean_token_accuracy": 0.9831917360424995, |
| "step": 1526 |
| }, |
| { |
| "epoch": 8.932551319648093, |
| "grad_norm": 0.3645654304118876, |
| "learning_rate": 4.968853530337587e-06, |
| "loss": 0.0584, |
| "mean_token_accuracy": 0.9815365374088287, |
| "step": 1527 |
| }, |
| { |
| "epoch": 8.93841642228739, |
| "grad_norm": 0.29681078383119797, |
| "learning_rate": 4.957785404148585e-06, |
| "loss": 0.0487, |
| "mean_token_accuracy": 0.9808258190751076, |
| "step": 1528 |
| }, |
| { |
| "epoch": 8.944281524926687, |
| "grad_norm": 0.3672868154121987, |
| "learning_rate": 4.946779134315662e-06, |
| "loss": 0.0609, |
| "mean_token_accuracy": 0.9804074466228485, |
| "step": 1529 |
| }, |
| { |
| "epoch": 8.950146627565982, |
| "grad_norm": 0.42562270878531583, |
| "learning_rate": 4.935834760787133e-06, |
| "loss": 0.0588, |
| "mean_token_accuracy": 0.9829668998718262, |
| "step": 1530 |
| }, |
| { |
| "epoch": 8.95601173020528, |
| "grad_norm": 0.3675536193118254, |
| "learning_rate": 4.924952323286651e-06, |
| "loss": 0.0561, |
| "mean_token_accuracy": 0.9807698279619217, |
| "step": 1531 |
| }, |
| { |
| "epoch": 8.961876832844574, |
| "grad_norm": 0.36281675499172533, |
| "learning_rate": 4.91413186131307e-06, |
| "loss": 0.0566, |
| "mean_token_accuracy": 0.9821875244379044, |
| "step": 1532 |
| }, |
| { |
| "epoch": 8.967741935483872, |
| "grad_norm": 0.34587044361172914, |
| "learning_rate": 4.9033734141402964e-06, |
| "loss": 0.0571, |
| "mean_token_accuracy": 0.9813329204916954, |
| "step": 1533 |
| }, |
| { |
| "epoch": 8.973607038123166, |
| "grad_norm": 0.32779198026920486, |
| "learning_rate": 4.892677020817151e-06, |
| "loss": 0.0545, |
| "mean_token_accuracy": 0.9812613651156425, |
| "step": 1534 |
| }, |
| { |
| "epoch": 8.979472140762464, |
| "grad_norm": 0.372802439575984, |
| "learning_rate": 4.8820427201672195e-06, |
| "loss": 0.0563, |
| "mean_token_accuracy": 0.9804784283041954, |
| "step": 1535 |
| }, |
| { |
| "epoch": 8.985337243401759, |
| "grad_norm": 0.41235127004454536, |
| "learning_rate": 4.871470550788717e-06, |
| "loss": 0.063, |
| "mean_token_accuracy": 0.9769391268491745, |
| "step": 1536 |
| }, |
| { |
| "epoch": 8.991202346041057, |
| "grad_norm": 0.35330775445880935, |
| "learning_rate": 4.860960551054352e-06, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9815262779593468, |
| "step": 1537 |
| }, |
| { |
| "epoch": 8.997067448680351, |
| "grad_norm": 0.3310851925728053, |
| "learning_rate": 4.850512759111177e-06, |
| "loss": 0.0554, |
| "mean_token_accuracy": 0.9816920757293701, |
| "step": 1538 |
| }, |
| { |
| "epoch": 9.0, |
| "grad_norm": 0.3310851925728053, |
| "learning_rate": 4.840127212880457e-06, |
| "loss": 0.0489, |
| "mean_token_accuracy": 0.9838157296180725, |
| "step": 1539 |
| }, |
| { |
| "epoch": 9.005865102639296, |
| "grad_norm": 0.4652301353877543, |
| "learning_rate": 4.82980395005753e-06, |
| "loss": 0.0539, |
| "mean_token_accuracy": 0.9824711456894875, |
| "step": 1540 |
| }, |
| { |
| "epoch": 9.011730205278592, |
| "grad_norm": 0.3871394835094803, |
| "learning_rate": 4.8195430081116715e-06, |
| "loss": 0.0566, |
| "mean_token_accuracy": 0.9818312674760818, |
| "step": 1541 |
| }, |
| { |
| "epoch": 9.017595307917889, |
| "grad_norm": 0.34228612380666135, |
| "learning_rate": 4.809344424285959e-06, |
| "loss": 0.0467, |
| "mean_token_accuracy": 0.9852696433663368, |
| "step": 1542 |
| }, |
| { |
| "epoch": 9.023460410557185, |
| "grad_norm": 0.3733639316356061, |
| "learning_rate": 4.799208235597129e-06, |
| "loss": 0.0579, |
| "mean_token_accuracy": 0.9798446521162987, |
| "step": 1543 |
| }, |
| { |
| "epoch": 9.029325513196481, |
| "grad_norm": 0.37824565482765427, |
| "learning_rate": 4.7891344788354535e-06, |
| "loss": 0.0546, |
| "mean_token_accuracy": 0.9813016727566719, |
| "step": 1544 |
| }, |
| { |
| "epoch": 9.035190615835777, |
| "grad_norm": 0.3711710253715166, |
| "learning_rate": 4.779123190564601e-06, |
| "loss": 0.0615, |
| "mean_token_accuracy": 0.9825925230979919, |
| "step": 1545 |
| }, |
| { |
| "epoch": 9.041055718475073, |
| "grad_norm": 0.363510890139849, |
| "learning_rate": 4.769174407121508e-06, |
| "loss": 0.0514, |
| "mean_token_accuracy": 0.982509970664978, |
| "step": 1546 |
| }, |
| { |
| "epoch": 9.04692082111437, |
| "grad_norm": 0.3243226197829895, |
| "learning_rate": 4.7592881646162336e-06, |
| "loss": 0.0623, |
| "mean_token_accuracy": 0.9804322570562363, |
| "step": 1547 |
| }, |
| { |
| "epoch": 9.052785923753666, |
| "grad_norm": 0.3894419167340949, |
| "learning_rate": 4.749464498931852e-06, |
| "loss": 0.0456, |
| "mean_token_accuracy": 0.9836910218000412, |
| "step": 1548 |
| }, |
| { |
| "epoch": 9.058651026392962, |
| "grad_norm": 0.3150320531942675, |
| "learning_rate": 4.739703445724296e-06, |
| "loss": 0.0538, |
| "mean_token_accuracy": 0.9851875305175781, |
| "step": 1549 |
| }, |
| { |
| "epoch": 9.064516129032258, |
| "grad_norm": 0.32325587902059943, |
| "learning_rate": 4.730005040422253e-06, |
| "loss": 0.0491, |
| "mean_token_accuracy": 0.9839693009853363, |
| "step": 1550 |
| }, |
| { |
| "epoch": 9.070381231671554, |
| "grad_norm": 0.33343639062744773, |
| "learning_rate": 4.720369318227014e-06, |
| "loss": 0.049, |
| "mean_token_accuracy": 0.9838002175092697, |
| "step": 1551 |
| }, |
| { |
| "epoch": 9.07624633431085, |
| "grad_norm": 0.3335968019145587, |
| "learning_rate": 4.710796314112358e-06, |
| "loss": 0.0527, |
| "mean_token_accuracy": 0.9830645993351936, |
| "step": 1552 |
| }, |
| { |
| "epoch": 9.082111436950147, |
| "grad_norm": 0.35686275798991973, |
| "learning_rate": 4.701286062824425e-06, |
| "loss": 0.0507, |
| "mean_token_accuracy": 0.9826326817274094, |
| "step": 1553 |
| }, |
| { |
| "epoch": 9.087976539589443, |
| "grad_norm": 0.39345801699794963, |
| "learning_rate": 4.691838598881587e-06, |
| "loss": 0.0547, |
| "mean_token_accuracy": 0.9825649484992027, |
| "step": 1554 |
| }, |
| { |
| "epoch": 9.093841642228739, |
| "grad_norm": 0.3179764795801783, |
| "learning_rate": 4.68245395657432e-06, |
| "loss": 0.0518, |
| "mean_token_accuracy": 0.9854598566889763, |
| "step": 1555 |
| }, |
| { |
| "epoch": 9.099706744868035, |
| "grad_norm": 0.3236361805001554, |
| "learning_rate": 4.673132169965089e-06, |
| "loss": 0.0509, |
| "mean_token_accuracy": 0.9836238846182823, |
| "step": 1556 |
| }, |
| { |
| "epoch": 9.105571847507331, |
| "grad_norm": 0.30731096453465834, |
| "learning_rate": 4.663873272888212e-06, |
| "loss": 0.0474, |
| "mean_token_accuracy": 0.986683115363121, |
| "step": 1557 |
| }, |
| { |
| "epoch": 9.111436950146627, |
| "grad_norm": 0.306870446012294, |
| "learning_rate": 4.654677298949746e-06, |
| "loss": 0.0514, |
| "mean_token_accuracy": 0.9820300340652466, |
| "step": 1558 |
| }, |
| { |
| "epoch": 9.117302052785924, |
| "grad_norm": 0.3221981747722701, |
| "learning_rate": 4.645544281527362e-06, |
| "loss": 0.0506, |
| "mean_token_accuracy": 0.9824572280049324, |
| "step": 1559 |
| }, |
| { |
| "epoch": 9.12316715542522, |
| "grad_norm": 0.30207536729210593, |
| "learning_rate": 4.636474253770226e-06, |
| "loss": 0.0453, |
| "mean_token_accuracy": 0.9838585555553436, |
| "step": 1560 |
| }, |
| { |
| "epoch": 9.129032258064516, |
| "grad_norm": 0.3241825961068261, |
| "learning_rate": 4.627467248598876e-06, |
| "loss": 0.0524, |
| "mean_token_accuracy": 0.9826252236962318, |
| "step": 1561 |
| }, |
| { |
| "epoch": 9.134897360703812, |
| "grad_norm": 0.3384107887549902, |
| "learning_rate": 4.618523298705101e-06, |
| "loss": 0.0512, |
| "mean_token_accuracy": 0.9828469306230545, |
| "step": 1562 |
| }, |
| { |
| "epoch": 9.140762463343108, |
| "grad_norm": 0.3418975656638684, |
| "learning_rate": 4.609642436551828e-06, |
| "loss": 0.0503, |
| "mean_token_accuracy": 0.9833681285381317, |
| "step": 1563 |
| }, |
| { |
| "epoch": 9.146627565982405, |
| "grad_norm": 0.3144109232730043, |
| "learning_rate": 4.600824694373e-06, |
| "loss": 0.0476, |
| "mean_token_accuracy": 0.9852636978030205, |
| "step": 1564 |
| }, |
| { |
| "epoch": 9.1524926686217, |
| "grad_norm": 0.34715116832048837, |
| "learning_rate": 4.592070104173461e-06, |
| "loss": 0.0502, |
| "mean_token_accuracy": 0.9837682098150253, |
| "step": 1565 |
| }, |
| { |
| "epoch": 9.158357771260997, |
| "grad_norm": 0.32008000943713794, |
| "learning_rate": 4.583378697728835e-06, |
| "loss": 0.0523, |
| "mean_token_accuracy": 0.9823531731963158, |
| "step": 1566 |
| }, |
| { |
| "epoch": 9.164222873900293, |
| "grad_norm": 0.3357093036143234, |
| "learning_rate": 4.574750506585419e-06, |
| "loss": 0.0478, |
| "mean_token_accuracy": 0.982313483953476, |
| "step": 1567 |
| }, |
| { |
| "epoch": 9.17008797653959, |
| "grad_norm": 0.35978845877064625, |
| "learning_rate": 4.566185562060062e-06, |
| "loss": 0.0562, |
| "mean_token_accuracy": 0.9809844046831131, |
| "step": 1568 |
| }, |
| { |
| "epoch": 9.175953079178885, |
| "grad_norm": 0.33646551155384047, |
| "learning_rate": 4.557683895240052e-06, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.9836227148771286, |
| "step": 1569 |
| }, |
| { |
| "epoch": 9.181818181818182, |
| "grad_norm": 0.4084517717867541, |
| "learning_rate": 4.549245536983009e-06, |
| "loss": 0.0523, |
| "mean_token_accuracy": 0.9832220077514648, |
| "step": 1570 |
| }, |
| { |
| "epoch": 9.187683284457478, |
| "grad_norm": 0.3800460400014487, |
| "learning_rate": 4.540870517916765e-06, |
| "loss": 0.0515, |
| "mean_token_accuracy": 0.9843539819121361, |
| "step": 1571 |
| }, |
| { |
| "epoch": 9.193548387096774, |
| "grad_norm": 0.3875506291047543, |
| "learning_rate": 4.532558868439249e-06, |
| "loss": 0.0538, |
| "mean_token_accuracy": 0.9836299493908882, |
| "step": 1572 |
| }, |
| { |
| "epoch": 9.19941348973607, |
| "grad_norm": 0.3339053484134062, |
| "learning_rate": 4.524310618718403e-06, |
| "loss": 0.0506, |
| "mean_token_accuracy": 0.9838271215558052, |
| "step": 1573 |
| }, |
| { |
| "epoch": 9.205278592375366, |
| "grad_norm": 0.3442412710070444, |
| "learning_rate": 4.516125798692037e-06, |
| "loss": 0.052, |
| "mean_token_accuracy": 0.983096070587635, |
| "step": 1574 |
| }, |
| { |
| "epoch": 9.211143695014663, |
| "grad_norm": 0.3751785309738875, |
| "learning_rate": 4.508004438067742e-06, |
| "loss": 0.0556, |
| "mean_token_accuracy": 0.9821300804615021, |
| "step": 1575 |
| }, |
| { |
| "epoch": 9.217008797653959, |
| "grad_norm": 0.3431266472138059, |
| "learning_rate": 4.4999465663227785e-06, |
| "loss": 0.0487, |
| "mean_token_accuracy": 0.9844409078359604, |
| "step": 1576 |
| }, |
| { |
| "epoch": 9.222873900293255, |
| "grad_norm": 0.31684104303608773, |
| "learning_rate": 4.491952212703964e-06, |
| "loss": 0.0522, |
| "mean_token_accuracy": 0.9832335263490677, |
| "step": 1577 |
| }, |
| { |
| "epoch": 9.228739002932551, |
| "grad_norm": 0.3484048489842224, |
| "learning_rate": 4.484021406227576e-06, |
| "loss": 0.0521, |
| "mean_token_accuracy": 0.9830200746655464, |
| "step": 1578 |
| }, |
| { |
| "epoch": 9.234604105571847, |
| "grad_norm": 0.37348106814548215, |
| "learning_rate": 4.476154175679239e-06, |
| "loss": 0.0566, |
| "mean_token_accuracy": 0.9807815030217171, |
| "step": 1579 |
| }, |
| { |
| "epoch": 9.240469208211143, |
| "grad_norm": 0.3465806459457609, |
| "learning_rate": 4.468350549613822e-06, |
| "loss": 0.0449, |
| "mean_token_accuracy": 0.9852471351623535, |
| "step": 1580 |
| }, |
| { |
| "epoch": 9.24633431085044, |
| "grad_norm": 0.3801664719699823, |
| "learning_rate": 4.460610556355333e-06, |
| "loss": 0.0576, |
| "mean_token_accuracy": 0.9800804182887077, |
| "step": 1581 |
| }, |
| { |
| "epoch": 9.252199413489736, |
| "grad_norm": 0.3195371027598601, |
| "learning_rate": 4.452934223996824e-06, |
| "loss": 0.0483, |
| "mean_token_accuracy": 0.9841778427362442, |
| "step": 1582 |
| }, |
| { |
| "epoch": 9.258064516129032, |
| "grad_norm": 0.31523364079250155, |
| "learning_rate": 4.445321580400281e-06, |
| "loss": 0.0504, |
| "mean_token_accuracy": 0.9817590713500977, |
| "step": 1583 |
| }, |
| { |
| "epoch": 9.263929618768328, |
| "grad_norm": 0.34571299346795814, |
| "learning_rate": 4.437772653196527e-06, |
| "loss": 0.0555, |
| "mean_token_accuracy": 0.9840084314346313, |
| "step": 1584 |
| }, |
| { |
| "epoch": 9.269794721407624, |
| "grad_norm": 0.3826595302078001, |
| "learning_rate": 4.430287469785118e-06, |
| "loss": 0.063, |
| "mean_token_accuracy": 0.9781069308519363, |
| "step": 1585 |
| }, |
| { |
| "epoch": 9.27565982404692, |
| "grad_norm": 0.45325381772938556, |
| "learning_rate": 4.422866057334246e-06, |
| "loss": 0.0571, |
| "mean_token_accuracy": 0.9835334494709969, |
| "step": 1586 |
| }, |
| { |
| "epoch": 9.281524926686217, |
| "grad_norm": 0.3544263748847487, |
| "learning_rate": 4.415508442780642e-06, |
| "loss": 0.0571, |
| "mean_token_accuracy": 0.9812069460749626, |
| "step": 1587 |
| }, |
| { |
| "epoch": 9.287390029325513, |
| "grad_norm": 0.366957343280142, |
| "learning_rate": 4.408214652829473e-06, |
| "loss": 0.0557, |
| "mean_token_accuracy": 0.9829774498939514, |
| "step": 1588 |
| }, |
| { |
| "epoch": 9.29325513196481, |
| "grad_norm": 0.31643775338518065, |
| "learning_rate": 4.400984713954253e-06, |
| "loss": 0.044, |
| "mean_token_accuracy": 0.9852609634399414, |
| "step": 1589 |
| }, |
| { |
| "epoch": 9.299120234604105, |
| "grad_norm": 0.3258433860675811, |
| "learning_rate": 4.39381865239674e-06, |
| "loss": 0.0577, |
| "mean_token_accuracy": 0.9815997928380966, |
| "step": 1590 |
| }, |
| { |
| "epoch": 9.304985337243401, |
| "grad_norm": 0.3809434963849625, |
| "learning_rate": 4.386716494166842e-06, |
| "loss": 0.055, |
| "mean_token_accuracy": 0.981240376830101, |
| "step": 1591 |
| }, |
| { |
| "epoch": 9.310850439882698, |
| "grad_norm": 0.39655430748262316, |
| "learning_rate": 4.379678265042529e-06, |
| "loss": 0.0544, |
| "mean_token_accuracy": 0.9810396283864975, |
| "step": 1592 |
| }, |
| { |
| "epoch": 9.316715542521994, |
| "grad_norm": 0.3706407474165295, |
| "learning_rate": 4.372703990569725e-06, |
| "loss": 0.0543, |
| "mean_token_accuracy": 0.9823009446263313, |
| "step": 1593 |
| }, |
| { |
| "epoch": 9.32258064516129, |
| "grad_norm": 0.36956199306402604, |
| "learning_rate": 4.365793696062231e-06, |
| "loss": 0.056, |
| "mean_token_accuracy": 0.9810444936156273, |
| "step": 1594 |
| }, |
| { |
| "epoch": 9.328445747800586, |
| "grad_norm": 0.36114052744290454, |
| "learning_rate": 4.358947406601626e-06, |
| "loss": 0.0496, |
| "mean_token_accuracy": 0.9843268916010857, |
| "step": 1595 |
| }, |
| { |
| "epoch": 9.334310850439882, |
| "grad_norm": 0.29903243274355235, |
| "learning_rate": 4.352165147037177e-06, |
| "loss": 0.0532, |
| "mean_token_accuracy": 0.9818791821599007, |
| "step": 1596 |
| }, |
| { |
| "epoch": 9.340175953079179, |
| "grad_norm": 0.3402033677627262, |
| "learning_rate": 4.345446941985741e-06, |
| "loss": 0.0497, |
| "mean_token_accuracy": 0.9822636842727661, |
| "step": 1597 |
| }, |
| { |
| "epoch": 9.346041055718475, |
| "grad_norm": 0.3216326624615543, |
| "learning_rate": 4.338792815831698e-06, |
| "loss": 0.05, |
| "mean_token_accuracy": 0.9808618873357773, |
| "step": 1598 |
| }, |
| { |
| "epoch": 9.351906158357771, |
| "grad_norm": 0.3981133789009082, |
| "learning_rate": 4.332202792726832e-06, |
| "loss": 0.06, |
| "mean_token_accuracy": 0.9808676987886429, |
| "step": 1599 |
| }, |
| { |
| "epoch": 9.357771260997067, |
| "grad_norm": 0.3730359853930091, |
| "learning_rate": 4.3256768965902684e-06, |
| "loss": 0.056, |
| "mean_token_accuracy": 0.9798526018857956, |
| "step": 1600 |
| }, |
| { |
| "epoch": 9.363636363636363, |
| "grad_norm": 0.38448222563283363, |
| "learning_rate": 4.319215151108373e-06, |
| "loss": 0.0645, |
| "mean_token_accuracy": 0.9782325327396393, |
| "step": 1601 |
| }, |
| { |
| "epoch": 9.36950146627566, |
| "grad_norm": 0.34869744775882194, |
| "learning_rate": 4.312817579734673e-06, |
| "loss": 0.0515, |
| "mean_token_accuracy": 0.9844043850898743, |
| "step": 1602 |
| }, |
| { |
| "epoch": 9.375366568914956, |
| "grad_norm": 0.35794045824975934, |
| "learning_rate": 4.306484205689768e-06, |
| "loss": 0.0575, |
| "mean_token_accuracy": 0.9812995940446854, |
| "step": 1603 |
| }, |
| { |
| "epoch": 9.381231671554252, |
| "grad_norm": 0.3536277186740108, |
| "learning_rate": 4.300215051961248e-06, |
| "loss": 0.0574, |
| "mean_token_accuracy": 0.9826265349984169, |
| "step": 1604 |
| }, |
| { |
| "epoch": 9.387096774193548, |
| "grad_norm": 0.34843689510087195, |
| "learning_rate": 4.2940101413036115e-06, |
| "loss": 0.0499, |
| "mean_token_accuracy": 0.9845296069979668, |
| "step": 1605 |
| }, |
| { |
| "epoch": 9.392961876832844, |
| "grad_norm": 0.3464031945943138, |
| "learning_rate": 4.287869496238174e-06, |
| "loss": 0.0597, |
| "mean_token_accuracy": 0.9806225821375847, |
| "step": 1606 |
| }, |
| { |
| "epoch": 9.39882697947214, |
| "grad_norm": 0.33197516109156383, |
| "learning_rate": 4.281793139053001e-06, |
| "loss": 0.0522, |
| "mean_token_accuracy": 0.9814345613121986, |
| "step": 1607 |
| }, |
| { |
| "epoch": 9.404692082111437, |
| "grad_norm": 0.38784078972064945, |
| "learning_rate": 4.275781091802811e-06, |
| "loss": 0.0671, |
| "mean_token_accuracy": 0.9796320497989655, |
| "step": 1608 |
| }, |
| { |
| "epoch": 9.410557184750733, |
| "grad_norm": 0.4182267076466508, |
| "learning_rate": 4.26983337630891e-06, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.9823887571692467, |
| "step": 1609 |
| }, |
| { |
| "epoch": 9.416422287390029, |
| "grad_norm": 0.40263024742952774, |
| "learning_rate": 4.263950014159103e-06, |
| "loss": 0.0566, |
| "mean_token_accuracy": 0.9805941879749298, |
| "step": 1610 |
| }, |
| { |
| "epoch": 9.422287390029325, |
| "grad_norm": 0.3184552151743527, |
| "learning_rate": 4.258131026707618e-06, |
| "loss": 0.0486, |
| "mean_token_accuracy": 0.9834803640842438, |
| "step": 1611 |
| }, |
| { |
| "epoch": 9.428152492668621, |
| "grad_norm": 0.34850095126657826, |
| "learning_rate": 4.2523764350750305e-06, |
| "loss": 0.0577, |
| "mean_token_accuracy": 0.9823050573468208, |
| "step": 1612 |
| }, |
| { |
| "epoch": 9.434017595307918, |
| "grad_norm": 0.35877944489864333, |
| "learning_rate": 4.246686260148179e-06, |
| "loss": 0.0529, |
| "mean_token_accuracy": 0.9828868806362152, |
| "step": 1613 |
| }, |
| { |
| "epoch": 9.439882697947214, |
| "grad_norm": 0.41578422834006884, |
| "learning_rate": 4.241060522580108e-06, |
| "loss": 0.0644, |
| "mean_token_accuracy": 0.9790749028325081, |
| "step": 1614 |
| }, |
| { |
| "epoch": 9.44574780058651, |
| "grad_norm": 0.3807165143700813, |
| "learning_rate": 4.2354992427899674e-06, |
| "loss": 0.0504, |
| "mean_token_accuracy": 0.9833511561155319, |
| "step": 1615 |
| }, |
| { |
| "epoch": 9.451612903225806, |
| "grad_norm": 0.3508137711756749, |
| "learning_rate": 4.23000244096296e-06, |
| "loss": 0.053, |
| "mean_token_accuracy": 0.9821149632334709, |
| "step": 1616 |
| }, |
| { |
| "epoch": 9.457478005865102, |
| "grad_norm": 0.34790844523575315, |
| "learning_rate": 4.224570137050254e-06, |
| "loss": 0.0439, |
| "mean_token_accuracy": 0.9859587997198105, |
| "step": 1617 |
| }, |
| { |
| "epoch": 9.463343108504398, |
| "grad_norm": 0.3069321947925801, |
| "learning_rate": 4.219202350768919e-06, |
| "loss": 0.0538, |
| "mean_token_accuracy": 0.9819561988115311, |
| "step": 1618 |
| }, |
| { |
| "epoch": 9.469208211143695, |
| "grad_norm": 0.33995148789180835, |
| "learning_rate": 4.213899101601853e-06, |
| "loss": 0.0546, |
| "mean_token_accuracy": 0.9821692854166031, |
| "step": 1619 |
| }, |
| { |
| "epoch": 9.47507331378299, |
| "grad_norm": 0.33004647287525, |
| "learning_rate": 4.208660408797708e-06, |
| "loss": 0.054, |
| "mean_token_accuracy": 0.9837897270917892, |
| "step": 1620 |
| }, |
| { |
| "epoch": 9.480938416422287, |
| "grad_norm": 0.34405905205242354, |
| "learning_rate": 4.203486291370821e-06, |
| "loss": 0.0526, |
| "mean_token_accuracy": 0.9838336259126663, |
| "step": 1621 |
| }, |
| { |
| "epoch": 9.486803519061583, |
| "grad_norm": 0.3825920637539007, |
| "learning_rate": 4.198376768101149e-06, |
| "loss": 0.0611, |
| "mean_token_accuracy": 0.9824720919132233, |
| "step": 1622 |
| }, |
| { |
| "epoch": 9.49266862170088, |
| "grad_norm": 0.39337900293357947, |
| "learning_rate": 4.193331857534198e-06, |
| "loss": 0.0506, |
| "mean_token_accuracy": 0.9829727709293365, |
| "step": 1623 |
| }, |
| { |
| "epoch": 9.498533724340176, |
| "grad_norm": 0.33098384606131753, |
| "learning_rate": 4.188351577980961e-06, |
| "loss": 0.048, |
| "mean_token_accuracy": 0.9843248054385185, |
| "step": 1624 |
| }, |
| { |
| "epoch": 9.504398826979472, |
| "grad_norm": 0.3400217049112287, |
| "learning_rate": 4.183435947517836e-06, |
| "loss": 0.0504, |
| "mean_token_accuracy": 0.9832568988204002, |
| "step": 1625 |
| }, |
| { |
| "epoch": 9.510263929618768, |
| "grad_norm": 0.318142451201231, |
| "learning_rate": 4.178584983986575e-06, |
| "loss": 0.0451, |
| "mean_token_accuracy": 0.9853277578949928, |
| "step": 1626 |
| }, |
| { |
| "epoch": 9.516129032258064, |
| "grad_norm": 0.31249425970359535, |
| "learning_rate": 4.173798704994221e-06, |
| "loss": 0.05, |
| "mean_token_accuracy": 0.9839732199907303, |
| "step": 1627 |
| }, |
| { |
| "epoch": 9.52199413489736, |
| "grad_norm": 0.34213460131037277, |
| "learning_rate": 4.169077127913031e-06, |
| "loss": 0.0569, |
| "mean_token_accuracy": 0.9797552153468132, |
| "step": 1628 |
| }, |
| { |
| "epoch": 9.527859237536656, |
| "grad_norm": 0.34521954141682165, |
| "learning_rate": 4.164420269880422e-06, |
| "loss": 0.0537, |
| "mean_token_accuracy": 0.9796594232320786, |
| "step": 1629 |
| }, |
| { |
| "epoch": 9.533724340175953, |
| "grad_norm": 0.3561614042212147, |
| "learning_rate": 4.159828147798914e-06, |
| "loss": 0.0495, |
| "mean_token_accuracy": 0.9844975918531418, |
| "step": 1630 |
| }, |
| { |
| "epoch": 9.539589442815249, |
| "grad_norm": 0.33458897730895226, |
| "learning_rate": 4.155300778336047e-06, |
| "loss": 0.053, |
| "mean_token_accuracy": 0.9825539439916611, |
| "step": 1631 |
| }, |
| { |
| "epoch": 9.545454545454545, |
| "grad_norm": 0.41386586180304163, |
| "learning_rate": 4.150838177924349e-06, |
| "loss": 0.0516, |
| "mean_token_accuracy": 0.9856827855110168, |
| "step": 1632 |
| }, |
| { |
| "epoch": 9.551319648093841, |
| "grad_norm": 0.2858406837497521, |
| "learning_rate": 4.146440362761256e-06, |
| "loss": 0.0526, |
| "mean_token_accuracy": 0.9834114909172058, |
| "step": 1633 |
| }, |
| { |
| "epoch": 9.557184750733137, |
| "grad_norm": 0.3319322824824308, |
| "learning_rate": 4.142107348809058e-06, |
| "loss": 0.0591, |
| "mean_token_accuracy": 0.9804484695196152, |
| "step": 1634 |
| }, |
| { |
| "epoch": 9.563049853372434, |
| "grad_norm": 0.36873534696604005, |
| "learning_rate": 4.1378391517948505e-06, |
| "loss": 0.0489, |
| "mean_token_accuracy": 0.9854116439819336, |
| "step": 1635 |
| }, |
| { |
| "epoch": 9.56891495601173, |
| "grad_norm": 0.371287763627244, |
| "learning_rate": 4.1336357872104614e-06, |
| "loss": 0.0545, |
| "mean_token_accuracy": 0.982661671936512, |
| "step": 1636 |
| }, |
| { |
| "epoch": 9.574780058651026, |
| "grad_norm": 0.31981684222677925, |
| "learning_rate": 4.12949727031241e-06, |
| "loss": 0.0552, |
| "mean_token_accuracy": 0.9830471277236938, |
| "step": 1637 |
| }, |
| { |
| "epoch": 9.580645161290322, |
| "grad_norm": 0.3496716369106886, |
| "learning_rate": 4.125423616121837e-06, |
| "loss": 0.0492, |
| "mean_token_accuracy": 0.984118863940239, |
| "step": 1638 |
| }, |
| { |
| "epoch": 9.586510263929618, |
| "grad_norm": 0.3019605956233339, |
| "learning_rate": 4.121414839424464e-06, |
| "loss": 0.0512, |
| "mean_token_accuracy": 0.983857087790966, |
| "step": 1639 |
| }, |
| { |
| "epoch": 9.592375366568914, |
| "grad_norm": 0.38743933966546673, |
| "learning_rate": 4.117470954770529e-06, |
| "loss": 0.0575, |
| "mean_token_accuracy": 0.9823267832398415, |
| "step": 1640 |
| }, |
| { |
| "epoch": 9.59824046920821, |
| "grad_norm": 0.2820952797381847, |
| "learning_rate": 4.1135919764747454e-06, |
| "loss": 0.0487, |
| "mean_token_accuracy": 0.9831160977482796, |
| "step": 1641 |
| }, |
| { |
| "epoch": 9.604105571847507, |
| "grad_norm": 0.33032888287751866, |
| "learning_rate": 4.109777918616235e-06, |
| "loss": 0.0546, |
| "mean_token_accuracy": 0.9848859757184982, |
| "step": 1642 |
| }, |
| { |
| "epoch": 9.609970674486803, |
| "grad_norm": 0.3447439897843936, |
| "learning_rate": 4.106028795038487e-06, |
| "loss": 0.0559, |
| "mean_token_accuracy": 0.9810968413949013, |
| "step": 1643 |
| }, |
| { |
| "epoch": 9.6158357771261, |
| "grad_norm": 0.39380441477471423, |
| "learning_rate": 4.102344619349307e-06, |
| "loss": 0.064, |
| "mean_token_accuracy": 0.9785462468862534, |
| "step": 1644 |
| }, |
| { |
| "epoch": 9.621700879765395, |
| "grad_norm": 0.3795143444733662, |
| "learning_rate": 4.098725404920763e-06, |
| "loss": 0.0614, |
| "mean_token_accuracy": 0.9801774621009827, |
| "step": 1645 |
| }, |
| { |
| "epoch": 9.627565982404692, |
| "grad_norm": 0.4239571722097151, |
| "learning_rate": 4.095171164889143e-06, |
| "loss": 0.0524, |
| "mean_token_accuracy": 0.9822197332978249, |
| "step": 1646 |
| }, |
| { |
| "epoch": 9.633431085043988, |
| "grad_norm": 0.3279893196301856, |
| "learning_rate": 4.091681912154903e-06, |
| "loss": 0.0525, |
| "mean_token_accuracy": 0.9810535982251167, |
| "step": 1647 |
| }, |
| { |
| "epoch": 9.639296187683284, |
| "grad_norm": 0.4146256842051382, |
| "learning_rate": 4.088257659382619e-06, |
| "loss": 0.0678, |
| "mean_token_accuracy": 0.9781135395169258, |
| "step": 1648 |
| }, |
| { |
| "epoch": 9.64516129032258, |
| "grad_norm": 0.41514329438471853, |
| "learning_rate": 4.0848984190009495e-06, |
| "loss": 0.0587, |
| "mean_token_accuracy": 0.9801534190773964, |
| "step": 1649 |
| }, |
| { |
| "epoch": 9.651026392961876, |
| "grad_norm": 0.2900172472788311, |
| "learning_rate": 4.081604203202577e-06, |
| "loss": 0.0467, |
| "mean_token_accuracy": 0.9854440614581108, |
| "step": 1650 |
| }, |
| { |
| "epoch": 9.656891495601172, |
| "grad_norm": 0.3171073659721082, |
| "learning_rate": 4.078375023944175e-06, |
| "loss": 0.0528, |
| "mean_token_accuracy": 0.983460322022438, |
| "step": 1651 |
| }, |
| { |
| "epoch": 9.662756598240469, |
| "grad_norm": 0.3618376086906897, |
| "learning_rate": 4.0752108929463625e-06, |
| "loss": 0.0608, |
| "mean_token_accuracy": 0.9778873026371002, |
| "step": 1652 |
| }, |
| { |
| "epoch": 9.668621700879765, |
| "grad_norm": 0.4064811886215041, |
| "learning_rate": 4.072111821693655e-06, |
| "loss": 0.0569, |
| "mean_token_accuracy": 0.982071690261364, |
| "step": 1653 |
| }, |
| { |
| "epoch": 9.674486803519061, |
| "grad_norm": 0.3487674151385284, |
| "learning_rate": 4.069077821434429e-06, |
| "loss": 0.0603, |
| "mean_token_accuracy": 0.9813675135374069, |
| "step": 1654 |
| }, |
| { |
| "epoch": 9.680351906158357, |
| "grad_norm": 0.48683525747641127, |
| "learning_rate": 4.06610890318088e-06, |
| "loss": 0.0505, |
| "mean_token_accuracy": 0.9826664626598358, |
| "step": 1655 |
| }, |
| { |
| "epoch": 9.686217008797653, |
| "grad_norm": 0.27565303241857414, |
| "learning_rate": 4.063205077708986e-06, |
| "loss": 0.0512, |
| "mean_token_accuracy": 0.9829757288098335, |
| "step": 1656 |
| }, |
| { |
| "epoch": 9.69208211143695, |
| "grad_norm": 0.4138815422448716, |
| "learning_rate": 4.060366355558456e-06, |
| "loss": 0.0561, |
| "mean_token_accuracy": 0.9805786311626434, |
| "step": 1657 |
| }, |
| { |
| "epoch": 9.697947214076246, |
| "grad_norm": 0.3521931887112191, |
| "learning_rate": 4.057592747032707e-06, |
| "loss": 0.0651, |
| "mean_token_accuracy": 0.9799509420990944, |
| "step": 1658 |
| }, |
| { |
| "epoch": 9.703812316715542, |
| "grad_norm": 0.36290266721596814, |
| "learning_rate": 4.054884262198816e-06, |
| "loss": 0.0479, |
| "mean_token_accuracy": 0.9829519093036652, |
| "step": 1659 |
| }, |
| { |
| "epoch": 9.709677419354838, |
| "grad_norm": 0.29181690040474056, |
| "learning_rate": 4.052240910887493e-06, |
| "loss": 0.0515, |
| "mean_token_accuracy": 0.9840708523988724, |
| "step": 1660 |
| }, |
| { |
| "epoch": 9.715542521994134, |
| "grad_norm": 0.3198053513374907, |
| "learning_rate": 4.049662702693031e-06, |
| "loss": 0.0517, |
| "mean_token_accuracy": 0.9815531522035599, |
| "step": 1661 |
| }, |
| { |
| "epoch": 9.72140762463343, |
| "grad_norm": 0.3567193819398695, |
| "learning_rate": 4.047149646973288e-06, |
| "loss": 0.0534, |
| "mean_token_accuracy": 0.9812785014510155, |
| "step": 1662 |
| }, |
| { |
| "epoch": 9.727272727272727, |
| "grad_norm": 0.3365250481082472, |
| "learning_rate": 4.044701752849639e-06, |
| "loss": 0.05, |
| "mean_token_accuracy": 0.9830398857593536, |
| "step": 1663 |
| }, |
| { |
| "epoch": 9.733137829912023, |
| "grad_norm": 0.3168117861192591, |
| "learning_rate": 4.042319029206954e-06, |
| "loss": 0.0496, |
| "mean_token_accuracy": 0.9836679548025131, |
| "step": 1664 |
| }, |
| { |
| "epoch": 9.739002932551319, |
| "grad_norm": 0.3083134474689967, |
| "learning_rate": 4.040001484693553e-06, |
| "loss": 0.0487, |
| "mean_token_accuracy": 0.9842707514762878, |
| "step": 1665 |
| }, |
| { |
| "epoch": 9.744868035190615, |
| "grad_norm": 0.3919955952416545, |
| "learning_rate": 4.037749127721191e-06, |
| "loss": 0.0519, |
| "mean_token_accuracy": 0.9838557988405228, |
| "step": 1666 |
| }, |
| { |
| "epoch": 9.750733137829911, |
| "grad_norm": 0.29872178905205415, |
| "learning_rate": 4.03556196646501e-06, |
| "loss": 0.049, |
| "mean_token_accuracy": 0.9841638430953026, |
| "step": 1667 |
| }, |
| { |
| "epoch": 9.756598240469208, |
| "grad_norm": 0.33607599431555435, |
| "learning_rate": 4.033440008863528e-06, |
| "loss": 0.0584, |
| "mean_token_accuracy": 0.9812700152397156, |
| "step": 1668 |
| }, |
| { |
| "epoch": 9.762463343108504, |
| "grad_norm": 0.3394818397662197, |
| "learning_rate": 4.031383262618588e-06, |
| "loss": 0.0586, |
| "mean_token_accuracy": 0.9813343957066536, |
| "step": 1669 |
| }, |
| { |
| "epoch": 9.7683284457478, |
| "grad_norm": 0.38828667419079976, |
| "learning_rate": 4.0293917351953505e-06, |
| "loss": 0.0538, |
| "mean_token_accuracy": 0.9834257811307907, |
| "step": 1670 |
| }, |
| { |
| "epoch": 9.774193548387096, |
| "grad_norm": 0.33338280065712544, |
| "learning_rate": 4.027465433822255e-06, |
| "loss": 0.0504, |
| "mean_token_accuracy": 0.98256666213274, |
| "step": 1671 |
| }, |
| { |
| "epoch": 9.780058651026392, |
| "grad_norm": 0.3411185677369752, |
| "learning_rate": 4.025604365490999e-06, |
| "loss": 0.0525, |
| "mean_token_accuracy": 0.9825597852468491, |
| "step": 1672 |
| }, |
| { |
| "epoch": 9.785923753665688, |
| "grad_norm": 0.3212285265038135, |
| "learning_rate": 4.0238085369565085e-06, |
| "loss": 0.0522, |
| "mean_token_accuracy": 0.9835484176874161, |
| "step": 1673 |
| }, |
| { |
| "epoch": 9.791788856304985, |
| "grad_norm": 0.294204389979182, |
| "learning_rate": 4.022077954736916e-06, |
| "loss": 0.0519, |
| "mean_token_accuracy": 0.9838820695877075, |
| "step": 1674 |
| }, |
| { |
| "epoch": 9.79765395894428, |
| "grad_norm": 0.38254802196013876, |
| "learning_rate": 4.020412625113535e-06, |
| "loss": 0.053, |
| "mean_token_accuracy": 0.9837944954633713, |
| "step": 1675 |
| }, |
| { |
| "epoch": 9.803519061583577, |
| "grad_norm": 0.36617266625723327, |
| "learning_rate": 4.018812554130839e-06, |
| "loss": 0.0638, |
| "mean_token_accuracy": 0.9815091416239738, |
| "step": 1676 |
| }, |
| { |
| "epoch": 9.809384164222873, |
| "grad_norm": 0.39530260374994275, |
| "learning_rate": 4.01727774759644e-06, |
| "loss": 0.0568, |
| "mean_token_accuracy": 0.9812266975641251, |
| "step": 1677 |
| }, |
| { |
| "epoch": 9.81524926686217, |
| "grad_norm": 0.36224793191945875, |
| "learning_rate": 4.0158082110810695e-06, |
| "loss": 0.0506, |
| "mean_token_accuracy": 0.9833802804350853, |
| "step": 1678 |
| }, |
| { |
| "epoch": 9.821114369501466, |
| "grad_norm": 0.3461078001305434, |
| "learning_rate": 4.014403949918545e-06, |
| "loss": 0.0518, |
| "mean_token_accuracy": 0.9832278341054916, |
| "step": 1679 |
| }, |
| { |
| "epoch": 9.826979472140762, |
| "grad_norm": 0.3639700603722806, |
| "learning_rate": 4.0130649692057715e-06, |
| "loss": 0.0556, |
| "mean_token_accuracy": 0.9809284582734108, |
| "step": 1680 |
| }, |
| { |
| "epoch": 9.832844574780058, |
| "grad_norm": 0.3633034118576501, |
| "learning_rate": 4.01179127380271e-06, |
| "loss": 0.0585, |
| "mean_token_accuracy": 0.9802731797099113, |
| "step": 1681 |
| }, |
| { |
| "epoch": 9.838709677419354, |
| "grad_norm": 0.32362509615190976, |
| "learning_rate": 4.010582868332353e-06, |
| "loss": 0.0474, |
| "mean_token_accuracy": 0.9848815277218819, |
| "step": 1682 |
| }, |
| { |
| "epoch": 9.84457478005865, |
| "grad_norm": 0.3477758631730095, |
| "learning_rate": 4.009439757180732e-06, |
| "loss": 0.0541, |
| "mean_token_accuracy": 0.9802240058779716, |
| "step": 1683 |
| }, |
| { |
| "epoch": 9.850439882697946, |
| "grad_norm": 0.3854795876675265, |
| "learning_rate": 4.008361944496875e-06, |
| "loss": 0.0545, |
| "mean_token_accuracy": 0.982623852789402, |
| "step": 1684 |
| }, |
| { |
| "epoch": 9.856304985337243, |
| "grad_norm": 0.3885500734464125, |
| "learning_rate": 4.00734943419281e-06, |
| "loss": 0.062, |
| "mean_token_accuracy": 0.9794270023703575, |
| "step": 1685 |
| }, |
| { |
| "epoch": 9.862170087976539, |
| "grad_norm": 0.38759202956879846, |
| "learning_rate": 4.006402229943534e-06, |
| "loss": 0.0549, |
| "mean_token_accuracy": 0.9819841310381889, |
| "step": 1686 |
| }, |
| { |
| "epoch": 9.868035190615835, |
| "grad_norm": 0.3150886823164933, |
| "learning_rate": 4.005520335187023e-06, |
| "loss": 0.0564, |
| "mean_token_accuracy": 0.9822871387004852, |
| "step": 1687 |
| }, |
| { |
| "epoch": 9.873900293255131, |
| "grad_norm": 0.35350832587231334, |
| "learning_rate": 4.004703753124195e-06, |
| "loss": 0.0565, |
| "mean_token_accuracy": 0.9831016659736633, |
| "step": 1688 |
| }, |
| { |
| "epoch": 9.879765395894427, |
| "grad_norm": 0.3193894482554477, |
| "learning_rate": 4.003952486718913e-06, |
| "loss": 0.0485, |
| "mean_token_accuracy": 0.9836105778813362, |
| "step": 1689 |
| }, |
| { |
| "epoch": 9.885630498533724, |
| "grad_norm": 0.30675392858895184, |
| "learning_rate": 4.003266538697973e-06, |
| "loss": 0.0523, |
| "mean_token_accuracy": 0.9822279661893845, |
| "step": 1690 |
| }, |
| { |
| "epoch": 9.89149560117302, |
| "grad_norm": 0.31085526491264526, |
| "learning_rate": 4.002645911551086e-06, |
| "loss": 0.0486, |
| "mean_token_accuracy": 0.9831148758530617, |
| "step": 1691 |
| }, |
| { |
| "epoch": 9.897360703812316, |
| "grad_norm": 0.310609074510971, |
| "learning_rate": 4.002090607530882e-06, |
| "loss": 0.0536, |
| "mean_token_accuracy": 0.9823655262589455, |
| "step": 1692 |
| }, |
| { |
| "epoch": 9.903225806451612, |
| "grad_norm": 0.5082679092863019, |
| "learning_rate": 4.001600628652887e-06, |
| "loss": 0.0684, |
| "mean_token_accuracy": 0.9778107851743698, |
| "step": 1693 |
| }, |
| { |
| "epoch": 9.909090909090908, |
| "grad_norm": 0.3580467978873467, |
| "learning_rate": 4.001175976695527e-06, |
| "loss": 0.0587, |
| "mean_token_accuracy": 0.9787137806415558, |
| "step": 1694 |
| }, |
| { |
| "epoch": 9.914956011730204, |
| "grad_norm": 0.3474754641798292, |
| "learning_rate": 4.000816653200117e-06, |
| "loss": 0.047, |
| "mean_token_accuracy": 0.9860777705907822, |
| "step": 1695 |
| }, |
| { |
| "epoch": 9.9208211143695, |
| "grad_norm": 0.3480833124779761, |
| "learning_rate": 4.000522659470857e-06, |
| "loss": 0.0521, |
| "mean_token_accuracy": 0.9829437881708145, |
| "step": 1696 |
| }, |
| { |
| "epoch": 9.926686217008797, |
| "grad_norm": 0.4096410573101437, |
| "learning_rate": 4.000293996574826e-06, |
| "loss": 0.0666, |
| "mean_token_accuracy": 0.9793807342648506, |
| "step": 1697 |
| }, |
| { |
| "epoch": 9.932551319648093, |
| "grad_norm": 0.3812968594988111, |
| "learning_rate": 4.000130665341977e-06, |
| "loss": 0.0637, |
| "mean_token_accuracy": 0.9800157248973846, |
| "step": 1698 |
| }, |
| { |
| "epoch": 9.93841642228739, |
| "grad_norm": 0.3406586288767792, |
| "learning_rate": 4.000032666365136e-06, |
| "loss": 0.0514, |
| "mean_token_accuracy": 0.9838827252388, |
| "step": 1699 |
| }, |
| { |
| "epoch": 9.944281524926687, |
| "grad_norm": 0.32809422893372536, |
| "learning_rate": 4.000000000000001e-06, |
| "loss": 0.0515, |
| "mean_token_accuracy": 0.9818282052874565, |
| "step": 1700 |
| }, |
| { |
| "epoch": 9.944281524926687, |
| "step": 1700, |
| "total_flos": 18857631034368.0, |
| "train_loss": 0.18426384230746942, |
| "train_runtime": 35740.9083, |
| "train_samples_per_second": 1.526, |
| "train_steps_per_second": 0.048 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 1700, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 10, |
| "save_steps": 200, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 18857631034368.0, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|