| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.3, |
| "eval_steps": 100, |
| "global_step": 300, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.001, |
| "grad_norm": 0.4723578691482544, |
| "learning_rate": 5.000000000000001e-07, |
| "loss": 15.0023, |
| "loss/crossentropy": 2.8106061220169067, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15208163857460022, |
| "loss/reg": 14.850236892700195, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.002, |
| "grad_norm": 0.3746773898601532, |
| "learning_rate": 1.0000000000000002e-06, |
| "loss": 14.8245, |
| "loss/crossentropy": 2.88076388835907, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13159187883138657, |
| "loss/reg": 14.692875862121582, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.003, |
| "grad_norm": 0.39371100068092346, |
| "learning_rate": 1.5e-06, |
| "loss": 14.6298, |
| "loss/crossentropy": 2.8186020851135254, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13105076551437378, |
| "loss/reg": 14.4987154006958, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.004, |
| "grad_norm": 0.4312513470649719, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 14.4644, |
| "loss/crossentropy": 2.561935782432556, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15662836283445358, |
| "loss/reg": 14.307746887207031, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.005, |
| "grad_norm": 0.5712147951126099, |
| "learning_rate": 2.5e-06, |
| "loss": 14.2884, |
| "loss/crossentropy": 2.6744261980056763, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16902200877666473, |
| "loss/reg": 14.119378089904785, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.006, |
| "grad_norm": 0.4991922080516815, |
| "learning_rate": 3e-06, |
| "loss": 14.0796, |
| "loss/crossentropy": 2.541142702102661, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14546513557434082, |
| "loss/reg": 13.934137344360352, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.007, |
| "grad_norm": 0.394466370344162, |
| "learning_rate": 3.5000000000000004e-06, |
| "loss": 13.8978, |
| "loss/crossentropy": 2.863157868385315, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14624819159507751, |
| "loss/reg": 13.751592636108398, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.008, |
| "grad_norm": 0.5288362503051758, |
| "learning_rate": 4.000000000000001e-06, |
| "loss": 13.7514, |
| "loss/crossentropy": 2.53192400932312, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17936921864748, |
| "loss/reg": 13.571989059448242, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.009, |
| "grad_norm": 0.3661612570285797, |
| "learning_rate": 4.5e-06, |
| "loss": 13.5249, |
| "loss/crossentropy": 2.6582940816879272, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13002116978168488, |
| "loss/reg": 13.39488697052002, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.01, |
| "grad_norm": 0.37344738841056824, |
| "learning_rate": 5e-06, |
| "loss": 13.3611, |
| "loss/crossentropy": 2.8080869913101196, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1404452547430992, |
| "loss/reg": 13.220650672912598, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.011, |
| "grad_norm": 0.47446200251579285, |
| "learning_rate": 5.500000000000001e-06, |
| "loss": 13.2185, |
| "loss/crossentropy": 2.7942490577697754, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16937557607889175, |
| "loss/reg": 13.049126625061035, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.012, |
| "grad_norm": 0.41823914647102356, |
| "learning_rate": 6e-06, |
| "loss": 13.0431, |
| "loss/crossentropy": 2.7883923053741455, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16317125409841537, |
| "loss/reg": 12.879945755004883, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.013, |
| "grad_norm": 0.3810077905654907, |
| "learning_rate": 6.5000000000000004e-06, |
| "loss": 12.8458, |
| "loss/crossentropy": 2.5887070894241333, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13234061002731323, |
| "loss/reg": 12.713478088378906, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.014, |
| "grad_norm": 0.861165463924408, |
| "learning_rate": 7.000000000000001e-06, |
| "loss": 12.7646, |
| "loss/crossentropy": 3.0489625930786133, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2151956632733345, |
| "loss/reg": 12.549367904663086, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.015, |
| "grad_norm": 0.3999025225639343, |
| "learning_rate": 7.5e-06, |
| "loss": 12.5428, |
| "loss/crossentropy": 2.6218740940093994, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15504448860883713, |
| "loss/reg": 12.38780403137207, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.016, |
| "grad_norm": 0.3485129177570343, |
| "grad_norm_var": 0.015782494207215392, |
| "learning_rate": 8.000000000000001e-06, |
| "loss": 12.3624, |
| "loss/crossentropy": 2.704634428024292, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1335841417312622, |
| "loss/reg": 12.228797912597656, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.017, |
| "grad_norm": 0.394564151763916, |
| "grad_norm_var": 0.015986270113443057, |
| "learning_rate": 8.500000000000002e-06, |
| "loss": 12.2458, |
| "loss/crossentropy": 2.6254637241363525, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17384596914052963, |
| "loss/reg": 12.071925163269043, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.018, |
| "grad_norm": 0.359417200088501, |
| "grad_norm_var": 0.016155457953818328, |
| "learning_rate": 9e-06, |
| "loss": 12.0449, |
| "loss/crossentropy": 2.6955989599227905, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.12739497795701027, |
| "loss/reg": 11.917520523071289, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.019, |
| "grad_norm": 0.4362143278121948, |
| "grad_norm_var": 0.01595094581098607, |
| "learning_rate": 9.5e-06, |
| "loss": 11.9337, |
| "loss/crossentropy": 2.6590176820755005, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16834121942520142, |
| "loss/reg": 11.765388488769531, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.02, |
| "grad_norm": 0.3776922821998596, |
| "grad_norm_var": 0.01628110467363616, |
| "learning_rate": 1e-05, |
| "loss": 11.7405, |
| "loss/crossentropy": 2.9381775856018066, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.12479494512081146, |
| "loss/reg": 11.615675926208496, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.021, |
| "grad_norm": 0.3638933598995209, |
| "grad_norm_var": 0.01558998159507059, |
| "learning_rate": 1.05e-05, |
| "loss": 11.5966, |
| "loss/crossentropy": 2.737419009208679, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.12864574417471886, |
| "loss/reg": 11.467905044555664, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.022, |
| "grad_norm": 0.3599083125591278, |
| "grad_norm_var": 0.01563029096619477, |
| "learning_rate": 1.1000000000000001e-05, |
| "loss": 11.4504, |
| "loss/crossentropy": 2.680889368057251, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1280709058046341, |
| "loss/reg": 11.322328567504883, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.023, |
| "grad_norm": 0.5118765830993652, |
| "grad_norm_var": 0.015976795681526535, |
| "learning_rate": 1.1500000000000002e-05, |
| "loss": 11.3452, |
| "loss/crossentropy": 2.6692968606948853, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16634425520896912, |
| "loss/reg": 11.178874969482422, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.024, |
| "grad_norm": 0.3334847688674927, |
| "grad_norm_var": 0.015910143486327633, |
| "learning_rate": 1.2e-05, |
| "loss": 11.1616, |
| "loss/crossentropy": 2.7403935194015503, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.12420090287923813, |
| "loss/reg": 11.037442207336426, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.025, |
| "grad_norm": 0.3828985095024109, |
| "grad_norm_var": 0.015801931574252005, |
| "learning_rate": 1.25e-05, |
| "loss": 11.0467, |
| "loss/crossentropy": 2.796410322189331, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14872785657644272, |
| "loss/reg": 10.897927284240723, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.026, |
| "grad_norm": 0.434479683637619, |
| "grad_norm_var": 0.015627081016672508, |
| "learning_rate": 1.3000000000000001e-05, |
| "loss": 10.9099, |
| "loss/crossentropy": 2.726790428161621, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14949394017457962, |
| "loss/reg": 10.760404586791992, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.027, |
| "grad_norm": 0.3825664520263672, |
| "grad_norm_var": 0.015577720555919354, |
| "learning_rate": 1.3500000000000001e-05, |
| "loss": 10.761, |
| "loss/crossentropy": 2.6250252723693848, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13614310324192047, |
| "loss/reg": 10.624829292297363, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.028, |
| "grad_norm": 0.3759184777736664, |
| "grad_norm_var": 0.015708703781819332, |
| "learning_rate": 1.4000000000000001e-05, |
| "loss": 10.6382, |
| "loss/crossentropy": 2.835649013519287, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1468915417790413, |
| "loss/reg": 10.491329193115234, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.029, |
| "grad_norm": 0.38468286395072937, |
| "grad_norm_var": 0.015690946589658594, |
| "learning_rate": 1.45e-05, |
| "loss": 10.4996, |
| "loss/crossentropy": 2.5807005167007446, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14014270156621933, |
| "loss/reg": 10.359500885009766, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.03, |
| "grad_norm": 0.4535682201385498, |
| "grad_norm_var": 0.002055153692638223, |
| "learning_rate": 1.5e-05, |
| "loss": 10.3735, |
| "loss/crossentropy": 2.547928214073181, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14383359998464584, |
| "loss/reg": 10.229703903198242, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.031, |
| "grad_norm": 0.38543379306793213, |
| "grad_norm_var": 0.0020563179121854346, |
| "learning_rate": 1.55e-05, |
| "loss": 10.2439, |
| "loss/crossentropy": 2.79032039642334, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1421559825539589, |
| "loss/reg": 10.101773262023926, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.032, |
| "grad_norm": 0.33202216029167175, |
| "grad_norm_var": 0.0021707343468757136, |
| "learning_rate": 1.6000000000000003e-05, |
| "loss": 10.0965, |
| "loss/crossentropy": 2.7922052145004272, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.12068676576018333, |
| "loss/reg": 9.97581672668457, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.033, |
| "grad_norm": 0.39245933294296265, |
| "grad_norm_var": 0.0021702323626646702, |
| "learning_rate": 1.65e-05, |
| "loss": 9.9865, |
| "loss/crossentropy": 2.733828544616699, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1354268342256546, |
| "loss/reg": 9.851117134094238, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.034, |
| "grad_norm": 0.38694247603416443, |
| "grad_norm_var": 0.0020992626690788385, |
| "learning_rate": 1.7000000000000003e-05, |
| "loss": 9.8885, |
| "loss/crossentropy": 2.777322292327881, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1599966138601303, |
| "loss/reg": 9.7284574508667, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.035, |
| "grad_norm": 0.35561245679855347, |
| "grad_norm_var": 0.0020449413010129036, |
| "learning_rate": 1.75e-05, |
| "loss": 9.7496, |
| "loss/crossentropy": 2.7148683071136475, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14201582968235016, |
| "loss/reg": 9.607542991638184, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.036, |
| "grad_norm": 0.4006306231021881, |
| "grad_norm_var": 0.002045261355702799, |
| "learning_rate": 1.8e-05, |
| "loss": 9.635, |
| "loss/crossentropy": 2.637976288795471, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14642605930566788, |
| "loss/reg": 9.488553047180176, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.037, |
| "grad_norm": 0.41898205876350403, |
| "grad_norm_var": 0.002044839434195215, |
| "learning_rate": 1.85e-05, |
| "loss": 9.5202, |
| "loss/crossentropy": 2.777597665786743, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14908844977617264, |
| "loss/reg": 9.371098518371582, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.038, |
| "grad_norm": 0.3961292803287506, |
| "grad_norm_var": 0.001965975366123729, |
| "learning_rate": 1.9e-05, |
| "loss": 9.4067, |
| "loss/crossentropy": 2.9033197164535522, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15159177035093307, |
| "loss/reg": 9.255077362060547, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.039, |
| "grad_norm": 0.38804033398628235, |
| "grad_norm_var": 0.0010025647229673696, |
| "learning_rate": 1.9500000000000003e-05, |
| "loss": 9.2802, |
| "loss/crossentropy": 2.889930486679077, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1394604668021202, |
| "loss/reg": 9.14071273803711, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.04, |
| "grad_norm": 0.42919155955314636, |
| "grad_norm_var": 0.0008826965462539841, |
| "learning_rate": 2e-05, |
| "loss": 9.1743, |
| "loss/crossentropy": 2.4006484746932983, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14666535705327988, |
| "loss/reg": 9.027677536010742, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.041, |
| "grad_norm": 0.37885668873786926, |
| "grad_norm_var": 0.0008895506586849048, |
| "learning_rate": 2.05e-05, |
| "loss": 9.0467, |
| "loss/crossentropy": 2.7493255138397217, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13013121858239174, |
| "loss/reg": 8.916520118713379, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.042, |
| "grad_norm": 0.5932357907295227, |
| "grad_norm_var": 0.0033328458836597336, |
| "learning_rate": 2.1e-05, |
| "loss": 8.97, |
| "loss/crossentropy": 2.6117889881134033, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16315819323062897, |
| "loss/reg": 8.80688190460205, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.043, |
| "grad_norm": 0.3747418224811554, |
| "grad_norm_var": 0.0033583994321603233, |
| "learning_rate": 2.15e-05, |
| "loss": 8.8396, |
| "loss/crossentropy": 2.663694739341736, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14052008837461472, |
| "loss/reg": 8.69911003112793, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.044, |
| "grad_norm": 0.4256257116794586, |
| "grad_norm_var": 0.0033339815653853837, |
| "learning_rate": 2.2000000000000003e-05, |
| "loss": 8.726, |
| "loss/crossentropy": 2.7881768941879272, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13326141238212585, |
| "loss/reg": 8.592697143554688, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.045, |
| "grad_norm": 0.4122328460216522, |
| "grad_norm_var": 0.0033030786394142473, |
| "learning_rate": 2.25e-05, |
| "loss": 8.6263, |
| "loss/crossentropy": 2.7951114177703857, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13837838172912598, |
| "loss/reg": 8.487950325012207, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.046, |
| "grad_norm": 0.4931789040565491, |
| "grad_norm_var": 0.0036432243285688614, |
| "learning_rate": 2.3000000000000003e-05, |
| "loss": 8.5405, |
| "loss/crossentropy": 2.80547297000885, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15613804012537003, |
| "loss/reg": 8.384378433227539, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.047, |
| "grad_norm": 0.4260822832584381, |
| "grad_norm_var": 0.003612225968443994, |
| "learning_rate": 2.35e-05, |
| "loss": 8.4325, |
| "loss/crossentropy": 2.7571998834609985, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15075545758008957, |
| "loss/reg": 8.281789779663086, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.048, |
| "grad_norm": 0.38472291827201843, |
| "grad_norm_var": 0.003218571473485099, |
| "learning_rate": 2.4e-05, |
| "loss": 8.3219, |
| "loss/crossentropy": 2.6643882989883423, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14136053621768951, |
| "loss/reg": 8.180567741394043, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.049, |
| "grad_norm": 0.36624062061309814, |
| "grad_norm_var": 0.0033439747229166835, |
| "learning_rate": 2.45e-05, |
| "loss": 8.2178, |
| "loss/crossentropy": 2.737678050994873, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13707132637500763, |
| "loss/reg": 8.080729484558105, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.05, |
| "grad_norm": 0.3760506212711334, |
| "grad_norm_var": 0.0033912685784647087, |
| "learning_rate": 2.5e-05, |
| "loss": 8.137, |
| "loss/crossentropy": 2.7842084169387817, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15427638590335846, |
| "loss/reg": 7.982727527618408, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.051, |
| "grad_norm": 0.4842914044857025, |
| "grad_norm_var": 0.0034291612008964874, |
| "learning_rate": 2.5500000000000003e-05, |
| "loss": 8.0328, |
| "loss/crossentropy": 2.4711248874664307, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14703373610973358, |
| "loss/reg": 7.885744094848633, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.052, |
| "grad_norm": 0.35167771577835083, |
| "grad_norm_var": 0.0037168779577402794, |
| "learning_rate": 2.6000000000000002e-05, |
| "loss": 7.9218, |
| "loss/crossentropy": 2.688042402267456, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13181188702583313, |
| "loss/reg": 7.789944648742676, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.053, |
| "grad_norm": 0.4046782851219177, |
| "grad_norm_var": 0.0037291369976557537, |
| "learning_rate": 2.6500000000000004e-05, |
| "loss": 7.8506, |
| "loss/crossentropy": 2.6470447778701782, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1550150215625763, |
| "loss/reg": 7.695549011230469, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.054, |
| "grad_norm": 0.36790555715560913, |
| "grad_norm_var": 0.003860515189158183, |
| "learning_rate": 2.7000000000000002e-05, |
| "loss": 7.7397, |
| "loss/crossentropy": 2.7709513902664185, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13687237352132797, |
| "loss/reg": 7.6028337478637695, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.055, |
| "grad_norm": 0.378646582365036, |
| "grad_norm_var": 0.003901108788218351, |
| "learning_rate": 2.7500000000000004e-05, |
| "loss": 7.6525, |
| "loss/crossentropy": 2.8750112056732178, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14087973535060883, |
| "loss/reg": 7.511648178100586, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.056, |
| "grad_norm": 0.43194806575775146, |
| "grad_norm_var": 0.0039066305166497416, |
| "learning_rate": 2.8000000000000003e-05, |
| "loss": 7.5885, |
| "loss/crossentropy": 2.6936086416244507, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16746176779270172, |
| "loss/reg": 7.421041965484619, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.057, |
| "grad_norm": 0.4012855291366577, |
| "grad_norm_var": 0.0038280935965925374, |
| "learning_rate": 2.8499999999999998e-05, |
| "loss": 7.4798, |
| "loss/crossentropy": 2.8553980588912964, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14783543348312378, |
| "loss/reg": 7.331946849822998, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.058, |
| "grad_norm": 0.41179144382476807, |
| "grad_norm_var": 0.0016229469351226081, |
| "learning_rate": 2.9e-05, |
| "loss": 7.3917, |
| "loss/crossentropy": 2.90153706073761, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1475769728422165, |
| "loss/reg": 7.24411153793335, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.059, |
| "grad_norm": 0.5224902033805847, |
| "grad_norm_var": 0.0023775492652975813, |
| "learning_rate": 2.95e-05, |
| "loss": 7.3131, |
| "loss/crossentropy": 2.922032952308655, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15628121048212051, |
| "loss/reg": 7.156867027282715, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.06, |
| "grad_norm": 0.511249840259552, |
| "grad_norm_var": 0.0029578979489786493, |
| "learning_rate": 3e-05, |
| "loss": 7.2356, |
| "loss/crossentropy": 2.7479801177978516, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1649666205048561, |
| "loss/reg": 7.070590019226074, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.061, |
| "grad_norm": 0.4054379165172577, |
| "grad_norm_var": 0.0029680738800097915, |
| "learning_rate": 3.05e-05, |
| "loss": 7.138, |
| "loss/crossentropy": 2.8455255031585693, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15279172360897064, |
| "loss/reg": 6.985229015350342, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.062, |
| "grad_norm": 0.3967909812927246, |
| "grad_norm_var": 0.002606398157824093, |
| "learning_rate": 3.1e-05, |
| "loss": 7.045, |
| "loss/crossentropy": 2.5253665447235107, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14376042783260345, |
| "loss/reg": 6.901230335235596, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.063, |
| "grad_norm": 0.5105597376823425, |
| "grad_norm_var": 0.0031904242194133975, |
| "learning_rate": 3.15e-05, |
| "loss": 7.0051, |
| "loss/crossentropy": 2.6825212240219116, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18690404295921326, |
| "loss/reg": 6.818210124969482, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.064, |
| "grad_norm": 0.4659242630004883, |
| "grad_norm_var": 0.0032302192085850684, |
| "learning_rate": 3.2000000000000005e-05, |
| "loss": 6.9076, |
| "loss/crossentropy": 2.6204874515533447, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17028357833623886, |
| "loss/reg": 6.737268447875977, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.065, |
| "grad_norm": 0.3556163012981415, |
| "grad_norm_var": 0.003319357356775484, |
| "learning_rate": 3.2500000000000004e-05, |
| "loss": 6.79, |
| "loss/crossentropy": 2.6547772884368896, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.13286586478352547, |
| "loss/reg": 6.657088756561279, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.066, |
| "grad_norm": 0.4854835867881775, |
| "grad_norm_var": 0.003375179781292544, |
| "learning_rate": 3.3e-05, |
| "loss": 6.758, |
| "loss/crossentropy": 2.817094564437866, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18038344383239746, |
| "loss/reg": 6.577615261077881, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.067, |
| "grad_norm": 0.4216950833797455, |
| "grad_norm_var": 0.0031699615767635542, |
| "learning_rate": 3.35e-05, |
| "loss": 6.6588, |
| "loss/crossentropy": 2.872538924217224, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15885479748249054, |
| "loss/reg": 6.499909400939941, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.068, |
| "grad_norm": 0.4400080442428589, |
| "grad_norm_var": 0.0027769945370051136, |
| "learning_rate": 3.4000000000000007e-05, |
| "loss": 6.5852, |
| "loss/crossentropy": 2.6969038248062134, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16181904822587967, |
| "loss/reg": 6.423398971557617, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.069, |
| "grad_norm": 0.4272904396057129, |
| "grad_norm_var": 0.0027266697361371774, |
| "learning_rate": 3.45e-05, |
| "loss": 6.5089, |
| "loss/crossentropy": 2.7056760787963867, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16134114563465118, |
| "loss/reg": 6.347527503967285, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.07, |
| "grad_norm": 0.4215809404850006, |
| "grad_norm_var": 0.0024381335593683163, |
| "learning_rate": 3.5e-05, |
| "loss": 6.4248, |
| "loss/crossentropy": 2.822075366973877, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15255100280046463, |
| "loss/reg": 6.2722673416137695, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.071, |
| "grad_norm": 0.3756145238876343, |
| "grad_norm_var": 0.0024621927937988008, |
| "learning_rate": 3.55e-05, |
| "loss": 6.3343, |
| "loss/crossentropy": 2.7529423236846924, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1365201622247696, |
| "loss/reg": 6.1977925300598145, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.072, |
| "grad_norm": 0.4465688467025757, |
| "grad_norm_var": 0.002466586095849488, |
| "learning_rate": 3.6e-05, |
| "loss": 6.2908, |
| "loss/crossentropy": 2.8122498989105225, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16586530208587646, |
| "loss/reg": 6.124953746795654, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.073, |
| "grad_norm": 0.5681706070899963, |
| "grad_norm_var": 0.0034022813413523423, |
| "learning_rate": 3.65e-05, |
| "loss": 6.2178, |
| "loss/crossentropy": 2.8980711698532104, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16514715552330017, |
| "loss/reg": 6.052603244781494, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.074, |
| "grad_norm": 0.48894357681274414, |
| "grad_norm_var": 0.0034029444248247385, |
| "learning_rate": 3.7e-05, |
| "loss": 6.1569, |
| "loss/crossentropy": 2.7240917682647705, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17589756846427917, |
| "loss/reg": 5.980965614318848, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.075, |
| "grad_norm": 0.4453263580799103, |
| "grad_norm_var": 0.003057192832421729, |
| "learning_rate": 3.7500000000000003e-05, |
| "loss": 6.0806, |
| "loss/crossentropy": 2.880316376686096, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17018750309944153, |
| "loss/reg": 5.910387992858887, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.076, |
| "grad_norm": 0.4072200357913971, |
| "grad_norm_var": 0.002854757019651292, |
| "learning_rate": 3.8e-05, |
| "loss": 5.9831, |
| "loss/crossentropy": 2.8352543115615845, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14264734089374542, |
| "loss/reg": 5.840408802032471, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.077, |
| "grad_norm": 0.47089695930480957, |
| "grad_norm_var": 0.002808781993245483, |
| "learning_rate": 3.85e-05, |
| "loss": 5.9645, |
| "loss/crossentropy": 2.9140390157699585, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19280433654785156, |
| "loss/reg": 5.771743297576904, |
| "step": 77 |
| }, |
| { |
| "epoch": 0.078, |
| "grad_norm": 0.3908418118953705, |
| "grad_norm_var": 0.0028496157710373726, |
| "learning_rate": 3.9000000000000006e-05, |
| "loss": 5.8542, |
| "loss/crossentropy": 2.70191752910614, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15121394395828247, |
| "loss/reg": 5.7030134201049805, |
| "step": 78 |
| }, |
| { |
| "epoch": 0.079, |
| "grad_norm": 0.417324423789978, |
| "grad_norm_var": 0.002579272338799754, |
| "learning_rate": 3.9500000000000005e-05, |
| "loss": 5.7921, |
| "loss/crossentropy": 2.8772886991500854, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15610723197460175, |
| "loss/reg": 5.6359782218933105, |
| "step": 79 |
| }, |
| { |
| "epoch": 0.08, |
| "grad_norm": 0.4123746156692505, |
| "grad_norm_var": 0.002568267863573048, |
| "learning_rate": 4e-05, |
| "loss": 5.7333, |
| "loss/crossentropy": 2.785880208015442, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16348882019519806, |
| "loss/reg": 5.569836616516113, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.081, |
| "grad_norm": 0.4967520833015442, |
| "grad_norm_var": 0.002301783549342551, |
| "learning_rate": 4.05e-05, |
| "loss": 5.6758, |
| "loss/crossentropy": 2.7999967336654663, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17136523872613907, |
| "loss/reg": 5.504480361938477, |
| "step": 81 |
| }, |
| { |
| "epoch": 0.082, |
| "grad_norm": 0.5052318572998047, |
| "grad_norm_var": 0.002433398774934281, |
| "learning_rate": 4.1e-05, |
| "loss": 5.6235, |
| "loss/crossentropy": 2.745284676551819, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.183761365711689, |
| "loss/reg": 5.439704418182373, |
| "step": 82 |
| }, |
| { |
| "epoch": 0.083, |
| "grad_norm": 0.42788586020469666, |
| "grad_norm_var": 0.0024157402006901862, |
| "learning_rate": 4.15e-05, |
| "loss": 5.5482, |
| "loss/crossentropy": 2.996077299118042, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1716163083910942, |
| "loss/reg": 5.37656831741333, |
| "step": 83 |
| }, |
| { |
| "epoch": 0.084, |
| "grad_norm": 0.4045878052711487, |
| "grad_norm_var": 0.002524230641886934, |
| "learning_rate": 4.2e-05, |
| "loss": 5.4639, |
| "loss/crossentropy": 2.611970067024231, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.14972157776355743, |
| "loss/reg": 5.314174175262451, |
| "step": 84 |
| }, |
| { |
| "epoch": 0.085, |
| "grad_norm": 0.41384124755859375, |
| "grad_norm_var": 0.0025657923048212758, |
| "learning_rate": 4.25e-05, |
| "loss": 5.4087, |
| "loss/crossentropy": 2.7544474601745605, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.15563561022281647, |
| "loss/reg": 5.253114223480225, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.086, |
| "grad_norm": 0.4230954945087433, |
| "grad_norm_var": 0.002561545150143992, |
| "learning_rate": 4.3e-05, |
| "loss": 5.3742, |
| "loss/crossentropy": 2.8086984157562256, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18146374821662903, |
| "loss/reg": 5.192752361297607, |
| "step": 86 |
| }, |
| { |
| "epoch": 0.087, |
| "grad_norm": 0.45918789505958557, |
| "grad_norm_var": 0.0022425431957913728, |
| "learning_rate": 4.35e-05, |
| "loss": 5.3138, |
| "loss/crossentropy": 2.785672187805176, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1815495491027832, |
| "loss/reg": 5.132205486297607, |
| "step": 87 |
| }, |
| { |
| "epoch": 0.088, |
| "grad_norm": 0.4096454083919525, |
| "grad_norm_var": 0.002337951427575909, |
| "learning_rate": 4.4000000000000006e-05, |
| "loss": 5.2231, |
| "loss/crossentropy": 2.7667384147644043, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1498737782239914, |
| "loss/reg": 5.073270797729492, |
| "step": 88 |
| }, |
| { |
| "epoch": 0.089, |
| "grad_norm": 0.4818902313709259, |
| "grad_norm_var": 0.001401593034965017, |
| "learning_rate": 4.4500000000000004e-05, |
| "loss": 5.1976, |
| "loss/crossentropy": 2.722651481628418, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18228119611740112, |
| "loss/reg": 5.015347480773926, |
| "step": 89 |
| }, |
| { |
| "epoch": 0.09, |
| "grad_norm": 0.4635304808616638, |
| "grad_norm_var": 0.0012793023910875926, |
| "learning_rate": 4.5e-05, |
| "loss": 5.1364, |
| "loss/crossentropy": 2.7336219549179077, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1788763403892517, |
| "loss/reg": 4.95751953125, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.091, |
| "grad_norm": 0.46776527166366577, |
| "grad_norm_var": 0.0013286457514422071, |
| "learning_rate": 4.55e-05, |
| "loss": 5.089, |
| "loss/crossentropy": 2.818411946296692, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18763671815395355, |
| "loss/reg": 4.901313781738281, |
| "step": 91 |
| }, |
| { |
| "epoch": 0.092, |
| "grad_norm": 0.4979424774646759, |
| "grad_norm_var": 0.0014374124356233827, |
| "learning_rate": 4.600000000000001e-05, |
| "loss": 5.033, |
| "loss/crossentropy": 2.780390739440918, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18797770142555237, |
| "loss/reg": 4.844989776611328, |
| "step": 92 |
| }, |
| { |
| "epoch": 0.093, |
| "grad_norm": 0.476500004529953, |
| "grad_norm_var": 0.0014576571842103666, |
| "learning_rate": 4.6500000000000005e-05, |
| "loss": 4.9601, |
| "loss/crossentropy": 2.873763084411621, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16978690773248672, |
| "loss/reg": 4.790344715118408, |
| "step": 93 |
| }, |
| { |
| "epoch": 0.094, |
| "grad_norm": 0.4781922996044159, |
| "grad_norm_var": 0.0012831022874166228, |
| "learning_rate": 4.7e-05, |
| "loss": 4.9191, |
| "loss/crossentropy": 2.7947566509246826, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1818721517920494, |
| "loss/reg": 4.737210273742676, |
| "step": 94 |
| }, |
| { |
| "epoch": 0.095, |
| "grad_norm": 0.4668790102005005, |
| "grad_norm_var": 0.0012059221432147993, |
| "learning_rate": 4.75e-05, |
| "loss": 4.8722, |
| "loss/crossentropy": 2.975098729133606, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18810376524925232, |
| "loss/reg": 4.684102535247803, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.096, |
| "grad_norm": 0.6337518095970154, |
| "grad_norm_var": 0.003000960526301677, |
| "learning_rate": 4.8e-05, |
| "loss": 4.8243, |
| "loss/crossentropy": 2.8563435077667236, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19358298182487488, |
| "loss/reg": 4.630751132965088, |
| "step": 96 |
| }, |
| { |
| "epoch": 0.097, |
| "grad_norm": 0.5349352955818176, |
| "grad_norm_var": 0.003232518858686584, |
| "learning_rate": 4.85e-05, |
| "loss": 4.7556, |
| "loss/crossentropy": 2.8064088821411133, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1775345504283905, |
| "loss/reg": 4.578096866607666, |
| "step": 97 |
| }, |
| { |
| "epoch": 0.098, |
| "grad_norm": 0.4641897976398468, |
| "grad_norm_var": 0.003153502010794122, |
| "learning_rate": 4.9e-05, |
| "loss": 4.7093, |
| "loss/crossentropy": 2.737215995788574, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18364109843969345, |
| "loss/reg": 4.525658130645752, |
| "step": 98 |
| }, |
| { |
| "epoch": 0.099, |
| "grad_norm": 0.4986944794654846, |
| "grad_norm_var": 0.003078809549519083, |
| "learning_rate": 4.9500000000000004e-05, |
| "loss": 4.6582, |
| "loss/crossentropy": 2.7446107864379883, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1834591180086136, |
| "loss/reg": 4.474703311920166, |
| "step": 99 |
| }, |
| { |
| "epoch": 0.1, |
| "grad_norm": 0.5496240854263306, |
| "grad_norm_var": 0.0030625509543914593, |
| "learning_rate": 5e-05, |
| "loss": 4.6055, |
| "loss/crossentropy": 2.8407260179519653, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18116910755634308, |
| "loss/reg": 4.424361228942871, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.101, |
| "grad_norm": 0.4984920024871826, |
| "grad_norm_var": 0.0027357110980922527, |
| "learning_rate": 5e-05, |
| "loss": 4.5352, |
| "loss/crossentropy": 2.6976126432418823, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1613123118877411, |
| "loss/reg": 4.373856544494629, |
| "step": 101 |
| }, |
| { |
| "epoch": 0.102, |
| "grad_norm": 0.46887120604515076, |
| "grad_norm_var": 0.0024719398916868435, |
| "learning_rate": 5e-05, |
| "loss": 4.5057, |
| "loss/crossentropy": 2.7894492149353027, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1814948320388794, |
| "loss/reg": 4.324183464050293, |
| "step": 102 |
| }, |
| { |
| "epoch": 0.103, |
| "grad_norm": 0.4655573070049286, |
| "grad_norm_var": 0.002447772493256354, |
| "learning_rate": 5e-05, |
| "loss": 4.4575, |
| "loss/crossentropy": 2.6779565811157227, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18209144473075867, |
| "loss/reg": 4.275397777557373, |
| "step": 103 |
| }, |
| { |
| "epoch": 0.104, |
| "grad_norm": 0.4939666986465454, |
| "grad_norm_var": 0.001977171889767393, |
| "learning_rate": 5e-05, |
| "loss": 4.4067, |
| "loss/crossentropy": 2.992090344429016, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17926523089408875, |
| "loss/reg": 4.227434158325195, |
| "step": 104 |
| }, |
| { |
| "epoch": 0.105, |
| "grad_norm": 0.43281158804893494, |
| "grad_norm_var": 0.0022220042113353564, |
| "learning_rate": 5e-05, |
| "loss": 4.3468, |
| "loss/crossentropy": 2.862754464149475, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16724150627851486, |
| "loss/reg": 4.179599761962891, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.106, |
| "grad_norm": 0.8847590684890747, |
| "grad_norm_var": 0.01164347760726185, |
| "learning_rate": 5e-05, |
| "loss": 4.3643, |
| "loss/crossentropy": 2.874763250350952, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2313619926571846, |
| "loss/reg": 4.132906436920166, |
| "step": 106 |
| }, |
| { |
| "epoch": 0.107, |
| "grad_norm": 0.48385030031204224, |
| "grad_norm_var": 0.011548569190511412, |
| "learning_rate": 5e-05, |
| "loss": 4.2836, |
| "loss/crossentropy": 2.7484434843063354, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1970255747437477, |
| "loss/reg": 4.086555004119873, |
| "step": 107 |
| }, |
| { |
| "epoch": 0.108, |
| "grad_norm": 0.483051061630249, |
| "grad_norm_var": 0.011607343550055186, |
| "learning_rate": 5e-05, |
| "loss": 4.2269, |
| "loss/crossentropy": 2.726169228553772, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1865312084555626, |
| "loss/reg": 4.04041051864624, |
| "step": 108 |
| }, |
| { |
| "epoch": 0.109, |
| "grad_norm": 0.4503677487373352, |
| "grad_norm_var": 0.011800312371701541, |
| "learning_rate": 5e-05, |
| "loss": 4.1623, |
| "loss/crossentropy": 2.7449240684509277, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.16741468757390976, |
| "loss/reg": 3.9949209690093994, |
| "step": 109 |
| }, |
| { |
| "epoch": 0.11, |
| "grad_norm": 0.5844920873641968, |
| "grad_norm_var": 0.011942339024676821, |
| "learning_rate": 5e-05, |
| "loss": 4.1617, |
| "loss/crossentropy": 2.856382727622986, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2112957313656807, |
| "loss/reg": 3.9503557682037354, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.111, |
| "grad_norm": 0.5652735829353333, |
| "grad_norm_var": 0.011789605977028102, |
| "learning_rate": 5e-05, |
| "loss": 4.1027, |
| "loss/crossentropy": 2.9140307903289795, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1970185860991478, |
| "loss/reg": 3.9056639671325684, |
| "step": 111 |
| }, |
| { |
| "epoch": 0.112, |
| "grad_norm": 0.4855252504348755, |
| "grad_norm_var": 0.011127969999493755, |
| "learning_rate": 5e-05, |
| "loss": 4.0417, |
| "loss/crossentropy": 2.6968199014663696, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.17954326421022415, |
| "loss/reg": 3.8621463775634766, |
| "step": 112 |
| }, |
| { |
| "epoch": 0.113, |
| "grad_norm": 0.48234689235687256, |
| "grad_norm_var": 0.011206813099540237, |
| "learning_rate": 5e-05, |
| "loss": 4.0007, |
| "loss/crossentropy": 2.799056887626648, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.18207771331071854, |
| "loss/reg": 3.818636178970337, |
| "step": 113 |
| }, |
| { |
| "epoch": 0.114, |
| "grad_norm": 0.5423383116722107, |
| "grad_norm_var": 0.01102529849710108, |
| "learning_rate": 5e-05, |
| "loss": 3.9723, |
| "loss/crossentropy": 2.8127297163009644, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19588155299425125, |
| "loss/reg": 3.7764365673065186, |
| "step": 114 |
| }, |
| { |
| "epoch": 0.115, |
| "grad_norm": 0.4785888195037842, |
| "grad_norm_var": 0.011116059207488442, |
| "learning_rate": 5e-05, |
| "loss": 3.9306, |
| "loss/crossentropy": 2.6822171211242676, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19706681370735168, |
| "loss/reg": 3.733567237854004, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.116, |
| "grad_norm": 0.6865644454956055, |
| "grad_norm_var": 0.012794859074889713, |
| "learning_rate": 5e-05, |
| "loss": 3.8993, |
| "loss/crossentropy": 2.7296390533447266, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20705384016036987, |
| "loss/reg": 3.69228458404541, |
| "step": 116 |
| }, |
| { |
| "epoch": 0.117, |
| "grad_norm": 0.541416347026825, |
| "grad_norm_var": 0.012727234722074092, |
| "learning_rate": 5e-05, |
| "loss": 3.8506, |
| "loss/crossentropy": 2.43897020816803, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.199504554271698, |
| "loss/reg": 3.6510753631591797, |
| "step": 117 |
| }, |
| { |
| "epoch": 0.118, |
| "grad_norm": 0.5443249940872192, |
| "grad_norm_var": 0.012436776617120237, |
| "learning_rate": 5e-05, |
| "loss": 3.8185, |
| "loss/crossentropy": 2.979251503944397, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2087908461689949, |
| "loss/reg": 3.609746217727661, |
| "step": 118 |
| }, |
| { |
| "epoch": 0.119, |
| "grad_norm": 0.49655717611312866, |
| "grad_norm_var": 0.012198124493338987, |
| "learning_rate": 5e-05, |
| "loss": 3.7735, |
| "loss/crossentropy": 2.741288185119629, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2037513107061386, |
| "loss/reg": 3.5697667598724365, |
| "step": 119 |
| }, |
| { |
| "epoch": 0.12, |
| "grad_norm": 0.544378936290741, |
| "grad_norm_var": 0.012049124650295052, |
| "learning_rate": 5e-05, |
| "loss": 3.7418, |
| "loss/crossentropy": 2.6515822410583496, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2117089480161667, |
| "loss/reg": 3.530067205429077, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.121, |
| "grad_norm": 0.5594172477722168, |
| "grad_norm_var": 0.011192301061983878, |
| "learning_rate": 5e-05, |
| "loss": 3.6918, |
| "loss/crossentropy": 3.024221420288086, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20095330476760864, |
| "loss/reg": 3.4908294677734375, |
| "step": 121 |
| }, |
| { |
| "epoch": 0.122, |
| "grad_norm": 0.5750042200088501, |
| "grad_norm_var": 0.0033974972164560876, |
| "learning_rate": 5e-05, |
| "loss": 3.654, |
| "loss/crossentropy": 2.953397750854492, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20131896436214447, |
| "loss/reg": 3.4526772499084473, |
| "step": 122 |
| }, |
| { |
| "epoch": 0.123, |
| "grad_norm": 0.6985020041465759, |
| "grad_norm_var": 0.004914360602863832, |
| "learning_rate": 5e-05, |
| "loss": 3.6063, |
| "loss/crossentropy": 3.0021926164627075, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19196629524230957, |
| "loss/reg": 3.4143624305725098, |
| "step": 123 |
| }, |
| { |
| "epoch": 0.124, |
| "grad_norm": 0.5525792837142944, |
| "grad_norm_var": 0.0046432755844203035, |
| "learning_rate": 5e-05, |
| "loss": 3.5834, |
| "loss/crossentropy": 2.7415930032730103, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2068946734070778, |
| "loss/reg": 3.376502513885498, |
| "step": 124 |
| }, |
| { |
| "epoch": 0.125, |
| "grad_norm": 0.5579767823219299, |
| "grad_norm_var": 0.0039485466275149, |
| "learning_rate": 5e-05, |
| "loss": 3.5704, |
| "loss/crossentropy": 2.9917668104171753, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.23110263049602509, |
| "loss/reg": 3.3393208980560303, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.126, |
| "grad_norm": 0.5378789305686951, |
| "grad_norm_var": 0.003906987758377944, |
| "learning_rate": 5e-05, |
| "loss": 3.5075, |
| "loss/crossentropy": 2.5526931285858154, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2052694484591484, |
| "loss/reg": 3.3022634983062744, |
| "step": 126 |
| }, |
| { |
| "epoch": 0.127, |
| "grad_norm": 0.47176846861839294, |
| "grad_norm_var": 0.00430094370460804, |
| "learning_rate": 5e-05, |
| "loss": 3.4467, |
| "loss/crossentropy": 2.696569323539734, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.1805185303092003, |
| "loss/reg": 3.2661733627319336, |
| "step": 127 |
| }, |
| { |
| "epoch": 0.128, |
| "grad_norm": 0.5889366269111633, |
| "grad_norm_var": 0.004118957968931118, |
| "learning_rate": 5e-05, |
| "loss": 3.4456, |
| "loss/crossentropy": 2.716612458229065, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.21495617926120758, |
| "loss/reg": 3.2306554317474365, |
| "step": 128 |
| }, |
| { |
| "epoch": 0.129, |
| "grad_norm": 0.5206383466720581, |
| "grad_norm_var": 0.003846500454155773, |
| "learning_rate": 5e-05, |
| "loss": 3.3906, |
| "loss/crossentropy": 2.822697639465332, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19472619146108627, |
| "loss/reg": 3.195892095565796, |
| "step": 129 |
| }, |
| { |
| "epoch": 0.13, |
| "grad_norm": 0.5264687538146973, |
| "grad_norm_var": 0.003891263139159214, |
| "learning_rate": 5e-05, |
| "loss": 3.3635, |
| "loss/crossentropy": 2.934883236885071, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20290321856737137, |
| "loss/reg": 3.160593271255493, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.131, |
| "grad_norm": 0.7590413093566895, |
| "grad_norm_var": 0.005947478634549071, |
| "learning_rate": 5e-05, |
| "loss": 3.3966, |
| "loss/crossentropy": 2.7194037437438965, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.27077148854732513, |
| "loss/reg": 3.1258270740509033, |
| "step": 131 |
| }, |
| { |
| "epoch": 0.132, |
| "grad_norm": 0.581623375415802, |
| "grad_norm_var": 0.005041033325442431, |
| "learning_rate": 5e-05, |
| "loss": 3.3275, |
| "loss/crossentropy": 2.6975520849227905, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.23590940982103348, |
| "loss/reg": 3.0916152000427246, |
| "step": 132 |
| }, |
| { |
| "epoch": 0.133, |
| "grad_norm": 0.5531981587409973, |
| "grad_norm_var": 0.00501104000776567, |
| "learning_rate": 5e-05, |
| "loss": 3.2843, |
| "loss/crossentropy": 2.8643434047698975, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.226410910487175, |
| "loss/reg": 3.0579142570495605, |
| "step": 133 |
| }, |
| { |
| "epoch": 0.134, |
| "grad_norm": 0.5661957263946533, |
| "grad_norm_var": 0.004975488363852891, |
| "learning_rate": 5e-05, |
| "loss": 3.234, |
| "loss/crossentropy": 2.69516921043396, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20884258300065994, |
| "loss/reg": 3.02518630027771, |
| "step": 134 |
| }, |
| { |
| "epoch": 0.135, |
| "grad_norm": 0.6015440225601196, |
| "grad_norm_var": 0.004662409555643191, |
| "learning_rate": 5e-05, |
| "loss": 3.2165, |
| "loss/crossentropy": 2.8518201112747192, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2233269065618515, |
| "loss/reg": 2.993175983428955, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.136, |
| "grad_norm": 0.5407832264900208, |
| "grad_norm_var": 0.004677752959838636, |
| "learning_rate": 5e-05, |
| "loss": 3.1783, |
| "loss/crossentropy": 2.605350375175476, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.21587271988391876, |
| "loss/reg": 2.962451219558716, |
| "step": 136 |
| }, |
| { |
| "epoch": 0.137, |
| "grad_norm": 0.5763463377952576, |
| "grad_norm_var": 0.004661682690455685, |
| "learning_rate": 5e-05, |
| "loss": 3.1416, |
| "loss/crossentropy": 3.1093978881835938, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2114047110080719, |
| "loss/reg": 2.9302432537078857, |
| "step": 137 |
| }, |
| { |
| "epoch": 0.138, |
| "grad_norm": 0.6173213720321655, |
| "grad_norm_var": 0.004770635458124413, |
| "learning_rate": 5e-05, |
| "loss": 3.1261, |
| "loss/crossentropy": 2.718475341796875, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22572749853134155, |
| "loss/reg": 2.900359630584717, |
| "step": 138 |
| }, |
| { |
| "epoch": 0.139, |
| "grad_norm": 0.6091275811195374, |
| "grad_norm_var": 0.003835986663604714, |
| "learning_rate": 5e-05, |
| "loss": 3.1152, |
| "loss/crossentropy": 2.9986231327056885, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24629146605730057, |
| "loss/reg": 2.868884801864624, |
| "step": 139 |
| }, |
| { |
| "epoch": 0.14, |
| "grad_norm": 0.6635368466377258, |
| "grad_norm_var": 0.004309425902894992, |
| "learning_rate": 5e-05, |
| "loss": 3.0839, |
| "loss/crossentropy": 2.8149209022521973, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24507686495780945, |
| "loss/reg": 2.8388445377349854, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.141, |
| "grad_norm": 0.5964515209197998, |
| "grad_norm_var": 0.0042914079234850615, |
| "learning_rate": 5e-05, |
| "loss": 3.0377, |
| "loss/crossentropy": 2.7409075498580933, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22913794964551926, |
| "loss/reg": 2.8085222244262695, |
| "step": 141 |
| }, |
| { |
| "epoch": 0.142, |
| "grad_norm": 0.5034841895103455, |
| "grad_norm_var": 0.004567356435965956, |
| "learning_rate": 5e-05, |
| "loss": 2.9696, |
| "loss/crossentropy": 2.737534761428833, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19174715876579285, |
| "loss/reg": 2.7779009342193604, |
| "step": 142 |
| }, |
| { |
| "epoch": 0.143, |
| "grad_norm": 0.6026555895805359, |
| "grad_norm_var": 0.0037531109745808865, |
| "learning_rate": 5e-05, |
| "loss": 2.969, |
| "loss/crossentropy": 2.80273699760437, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22040193527936935, |
| "loss/reg": 2.748551607131958, |
| "step": 143 |
| }, |
| { |
| "epoch": 0.144, |
| "grad_norm": 0.666439414024353, |
| "grad_norm_var": 0.004138625305930965, |
| "learning_rate": 5e-05, |
| "loss": 2.9522, |
| "loss/crossentropy": 3.0744409561157227, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.232220858335495, |
| "loss/reg": 2.719933032989502, |
| "step": 144 |
| }, |
| { |
| "epoch": 0.145, |
| "grad_norm": 0.6524785161018372, |
| "grad_norm_var": 0.003956421144101782, |
| "learning_rate": 5e-05, |
| "loss": 2.9164, |
| "loss/crossentropy": 2.7517281770706177, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22563594579696655, |
| "loss/reg": 2.6907875537872314, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.146, |
| "grad_norm": 0.5523445010185242, |
| "grad_norm_var": 0.003740977996257063, |
| "learning_rate": 5e-05, |
| "loss": 2.8726, |
| "loss/crossentropy": 2.757808804512024, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20937243849039078, |
| "loss/reg": 2.663196086883545, |
| "step": 146 |
| }, |
| { |
| "epoch": 0.147, |
| "grad_norm": 0.6249282360076904, |
| "grad_norm_var": 0.002068765434605761, |
| "learning_rate": 5e-05, |
| "loss": 2.8454, |
| "loss/crossentropy": 2.5913329124450684, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20920735597610474, |
| "loss/reg": 2.6361756324768066, |
| "step": 147 |
| }, |
| { |
| "epoch": 0.148, |
| "grad_norm": 0.60258549451828, |
| "grad_norm_var": 0.002060857699743638, |
| "learning_rate": 5e-05, |
| "loss": 2.8384, |
| "loss/crossentropy": 2.7472609281539917, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22973284125328064, |
| "loss/reg": 2.6086883544921875, |
| "step": 148 |
| }, |
| { |
| "epoch": 0.149, |
| "grad_norm": 0.6428982019424438, |
| "grad_norm_var": 0.0020567465452488035, |
| "learning_rate": 5e-05, |
| "loss": 2.8164, |
| "loss/crossentropy": 2.794912338256836, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2351183295249939, |
| "loss/reg": 2.58123517036438, |
| "step": 149 |
| }, |
| { |
| "epoch": 0.15, |
| "grad_norm": 0.5828113555908203, |
| "grad_norm_var": 0.0019964633899114256, |
| "learning_rate": 5e-05, |
| "loss": 2.77, |
| "loss/crossentropy": 2.8268847465515137, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2156360223889351, |
| "loss/reg": 2.554368019104004, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.151, |
| "grad_norm": 0.5313657522201538, |
| "grad_norm_var": 0.0023107269902267634, |
| "learning_rate": 5e-05, |
| "loss": 2.7369, |
| "loss/crossentropy": 2.7121708393096924, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20960784703493118, |
| "loss/reg": 2.5272669792175293, |
| "step": 151 |
| }, |
| { |
| "epoch": 0.152, |
| "grad_norm": 0.6290384531021118, |
| "grad_norm_var": 0.0021260438734969967, |
| "learning_rate": 5e-05, |
| "loss": 2.7525, |
| "loss/crossentropy": 2.6518945693969727, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2527272552251816, |
| "loss/reg": 2.4997715950012207, |
| "step": 152 |
| }, |
| { |
| "epoch": 0.153, |
| "grad_norm": 0.6068312525749207, |
| "grad_norm_var": 0.0020743122187930397, |
| "learning_rate": 5e-05, |
| "loss": 2.6979, |
| "loss/crossentropy": 2.895982503890991, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22569575160741806, |
| "loss/reg": 2.4722533226013184, |
| "step": 153 |
| }, |
| { |
| "epoch": 0.154, |
| "grad_norm": 0.6289188861846924, |
| "grad_norm_var": 0.002101356175176683, |
| "learning_rate": 5e-05, |
| "loss": 2.6976, |
| "loss/crossentropy": 2.899941921234131, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2522962614893913, |
| "loss/reg": 2.4453046321868896, |
| "step": 154 |
| }, |
| { |
| "epoch": 0.155, |
| "grad_norm": 0.5613990426063538, |
| "grad_norm_var": 0.0022237872473363394, |
| "learning_rate": 5e-05, |
| "loss": 2.6154, |
| "loss/crossentropy": 2.797275185585022, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.19633204489946365, |
| "loss/reg": 2.4190335273742676, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.156, |
| "grad_norm": 0.6884348392486572, |
| "grad_norm_var": 0.0024634630505220743, |
| "learning_rate": 5e-05, |
| "loss": 2.6111, |
| "loss/crossentropy": 2.9471700191497803, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.21896883845329285, |
| "loss/reg": 2.392162561416626, |
| "step": 156 |
| }, |
| { |
| "epoch": 0.157, |
| "grad_norm": 0.7496616840362549, |
| "grad_norm_var": 0.003764773121766973, |
| "learning_rate": 5e-05, |
| "loss": 2.6336, |
| "loss/crossentropy": 2.9294755458831787, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2678024023771286, |
| "loss/reg": 2.365837335586548, |
| "step": 157 |
| }, |
| { |
| "epoch": 0.158, |
| "grad_norm": 0.5821413993835449, |
| "grad_norm_var": 0.002990917729452214, |
| "learning_rate": 5e-05, |
| "loss": 2.5575, |
| "loss/crossentropy": 2.6858139038085938, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.21864495426416397, |
| "loss/reg": 2.338852643966675, |
| "step": 158 |
| }, |
| { |
| "epoch": 0.159, |
| "grad_norm": 0.57896488904953, |
| "grad_norm_var": 0.0030778080010032127, |
| "learning_rate": 5e-05, |
| "loss": 2.5235, |
| "loss/crossentropy": 2.811260938644409, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2114136442542076, |
| "loss/reg": 2.312105417251587, |
| "step": 159 |
| }, |
| { |
| "epoch": 0.16, |
| "grad_norm": 0.6234793066978455, |
| "grad_norm_var": 0.0029132751210981284, |
| "learning_rate": 5e-05, |
| "loss": 2.5127, |
| "loss/crossentropy": 2.6081695556640625, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22646665573120117, |
| "loss/reg": 2.2862415313720703, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.161, |
| "grad_norm": 0.5921000838279724, |
| "grad_norm_var": 0.002838538594336256, |
| "learning_rate": 5e-05, |
| "loss": 2.4922, |
| "loss/crossentropy": 2.8606789112091064, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2310907021164894, |
| "loss/reg": 2.26111102104187, |
| "step": 161 |
| }, |
| { |
| "epoch": 0.162, |
| "grad_norm": 0.5616194605827332, |
| "grad_norm_var": 0.0027712310502317522, |
| "learning_rate": 5e-05, |
| "loss": 2.4425, |
| "loss/crossentropy": 2.7915310859680176, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.20626427978277206, |
| "loss/reg": 2.2362594604492188, |
| "step": 162 |
| }, |
| { |
| "epoch": 0.163, |
| "grad_norm": 0.6244944930076599, |
| "grad_norm_var": 0.0027704777096081066, |
| "learning_rate": 5e-05, |
| "loss": 2.4582, |
| "loss/crossentropy": 2.8933472633361816, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24601207673549652, |
| "loss/reg": 2.212204933166504, |
| "step": 163 |
| }, |
| { |
| "epoch": 0.164, |
| "grad_norm": 0.7053691148757935, |
| "grad_norm_var": 0.0033062377336333143, |
| "learning_rate": 5e-05, |
| "loss": 2.4359, |
| "loss/crossentropy": 2.8977383375167847, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24797701835632324, |
| "loss/reg": 2.1879541873931885, |
| "step": 164 |
| }, |
| { |
| "epoch": 0.165, |
| "grad_norm": 0.6405443549156189, |
| "grad_norm_var": 0.0032987997895917924, |
| "learning_rate": 5e-05, |
| "loss": 2.3942, |
| "loss/crossentropy": 2.825384020805359, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.23023562878370285, |
| "loss/reg": 2.163928747177124, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.166, |
| "grad_norm": 0.6541801691055298, |
| "grad_norm_var": 0.0032827854741000213, |
| "learning_rate": 5e-05, |
| "loss": 2.3741, |
| "loss/crossentropy": 2.8386305570602417, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2345515489578247, |
| "loss/reg": 2.139528512954712, |
| "step": 166 |
| }, |
| { |
| "epoch": 0.167, |
| "grad_norm": 0.6583338975906372, |
| "grad_norm_var": 0.0027490642355084915, |
| "learning_rate": 5e-05, |
| "loss": 2.3636, |
| "loss/crossentropy": 2.785430431365967, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.247977614402771, |
| "loss/reg": 2.1156005859375, |
| "step": 167 |
| }, |
| { |
| "epoch": 0.168, |
| "grad_norm": 0.5703513622283936, |
| "grad_norm_var": 0.0029745445667010983, |
| "learning_rate": 5e-05, |
| "loss": 2.3032, |
| "loss/crossentropy": 2.603759288787842, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2102563977241516, |
| "loss/reg": 2.092987298965454, |
| "step": 168 |
| }, |
| { |
| "epoch": 0.169, |
| "grad_norm": 0.6985337138175964, |
| "grad_norm_var": 0.003257480486076503, |
| "learning_rate": 5e-05, |
| "loss": 2.3175, |
| "loss/crossentropy": 2.891556143760681, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24685098230838776, |
| "loss/reg": 2.070605516433716, |
| "step": 169 |
| }, |
| { |
| "epoch": 0.17, |
| "grad_norm": 0.6564696431159973, |
| "grad_norm_var": 0.0032921040179458407, |
| "learning_rate": 5e-05, |
| "loss": 2.2837, |
| "loss/crossentropy": 2.9180163145065308, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.23512013256549835, |
| "loss/reg": 2.0485329627990723, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.171, |
| "grad_norm": 0.6509192585945129, |
| "grad_norm_var": 0.0029248539321223028, |
| "learning_rate": 5e-05, |
| "loss": 2.258, |
| "loss/crossentropy": 2.7910646200180054, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.23172831535339355, |
| "loss/reg": 2.026299238204956, |
| "step": 171 |
| }, |
| { |
| "epoch": 0.172, |
| "grad_norm": 0.6449978351593018, |
| "grad_norm_var": 0.0027606684899635398, |
| "learning_rate": 5e-05, |
| "loss": 2.2302, |
| "loss/crossentropy": 3.004155158996582, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2251865491271019, |
| "loss/reg": 2.005056858062744, |
| "step": 172 |
| }, |
| { |
| "epoch": 0.173, |
| "grad_norm": 0.6231856942176819, |
| "grad_norm_var": 0.0018607324261684136, |
| "learning_rate": 5e-05, |
| "loss": 2.2045, |
| "loss/crossentropy": 3.0147972106933594, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22081061452627182, |
| "loss/reg": 1.9836708307266235, |
| "step": 173 |
| }, |
| { |
| "epoch": 0.174, |
| "grad_norm": 0.6359754204750061, |
| "grad_norm_var": 0.0017047630970570863, |
| "learning_rate": 5e-05, |
| "loss": 2.203, |
| "loss/crossentropy": 2.7926268577575684, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24143800139427185, |
| "loss/reg": 1.9615272283554077, |
| "step": 174 |
| }, |
| { |
| "epoch": 0.175, |
| "grad_norm": 0.6079391241073608, |
| "grad_norm_var": 0.001550529933650028, |
| "learning_rate": 5e-05, |
| "loss": 2.1815, |
| "loss/crossentropy": 2.961400032043457, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2405368685722351, |
| "loss/reg": 1.9409407377243042, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.176, |
| "grad_norm": 0.6828028559684753, |
| "grad_norm_var": 0.0016850473641842592, |
| "learning_rate": 5e-05, |
| "loss": 2.166, |
| "loss/crossentropy": 2.83432674407959, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24647437781095505, |
| "loss/reg": 1.9194848537445068, |
| "step": 176 |
| }, |
| { |
| "epoch": 0.177, |
| "grad_norm": 0.583172082901001, |
| "grad_norm_var": 0.0017446548013318945, |
| "learning_rate": 5e-05, |
| "loss": 2.1225, |
| "loss/crossentropy": 2.6807559728622437, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.22420911490917206, |
| "loss/reg": 1.898259162902832, |
| "step": 177 |
| }, |
| { |
| "epoch": 0.178, |
| "grad_norm": 0.6200628280639648, |
| "grad_norm_var": 0.0013673776092881932, |
| "learning_rate": 5e-05, |
| "loss": 2.1376, |
| "loss/crossentropy": 2.9716767072677612, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2610393464565277, |
| "loss/reg": 1.8765897750854492, |
| "step": 178 |
| }, |
| { |
| "epoch": 0.179, |
| "grad_norm": 0.6542585492134094, |
| "grad_norm_var": 0.0013569131776667, |
| "learning_rate": 5e-05, |
| "loss": 2.1007, |
| "loss/crossentropy": 3.0051069259643555, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24396414309740067, |
| "loss/reg": 1.8567471504211426, |
| "step": 179 |
| }, |
| { |
| "epoch": 0.18, |
| "grad_norm": 0.9708628058433533, |
| "grad_norm_var": 0.007972158889666152, |
| "learning_rate": 5e-05, |
| "loss": 2.0914, |
| "loss/crossentropy": 2.86428964138031, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.25481095910072327, |
| "loss/reg": 1.836567997932434, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.181, |
| "grad_norm": 0.6248161792755127, |
| "grad_norm_var": 0.008027448831500313, |
| "learning_rate": 5e-05, |
| "loss": 2.0598, |
| "loss/crossentropy": 2.821524500846863, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24282748252153397, |
| "loss/reg": 1.8169887065887451, |
| "step": 181 |
| }, |
| { |
| "epoch": 0.182, |
| "grad_norm": 0.7002890110015869, |
| "grad_norm_var": 0.00813343676731737, |
| "learning_rate": 5e-05, |
| "loss": 2.054, |
| "loss/crossentropy": 3.0109115839004517, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2566458582878113, |
| "loss/reg": 1.7973047494888306, |
| "step": 182 |
| }, |
| { |
| "epoch": 0.183, |
| "grad_norm": 0.6039508581161499, |
| "grad_norm_var": 0.008340772420818076, |
| "learning_rate": 5e-05, |
| "loss": 1.9981, |
| "loss/crossentropy": 2.843403697013855, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.21949958056211472, |
| "loss/reg": 1.7785518169403076, |
| "step": 183 |
| }, |
| { |
| "epoch": 0.184, |
| "grad_norm": 0.7374703288078308, |
| "grad_norm_var": 0.008132468130944526, |
| "learning_rate": 5e-05, |
| "loss": 2.0347, |
| "loss/crossentropy": 2.9846529960632324, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2751229256391525, |
| "loss/reg": 1.759621262550354, |
| "step": 184 |
| }, |
| { |
| "epoch": 0.185, |
| "grad_norm": 0.7407576441764832, |
| "grad_norm_var": 0.008413085807039151, |
| "learning_rate": 5e-05, |
| "loss": 2.0126, |
| "loss/crossentropy": 2.625816583633423, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.27212583273649216, |
| "loss/reg": 1.7404433488845825, |
| "step": 185 |
| }, |
| { |
| "epoch": 0.186, |
| "grad_norm": 0.6439176201820374, |
| "grad_norm_var": 0.008447452827555664, |
| "learning_rate": 5e-05, |
| "loss": 1.9754, |
| "loss/crossentropy": 2.86109459400177, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.25359396636486053, |
| "loss/reg": 1.7217762470245361, |
| "step": 186 |
| }, |
| { |
| "epoch": 0.187, |
| "grad_norm": 0.6065324544906616, |
| "grad_norm_var": 0.008685503322823192, |
| "learning_rate": 5e-05, |
| "loss": 1.9392, |
| "loss/crossentropy": 2.9565869569778442, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2354530543088913, |
| "loss/reg": 1.7037036418914795, |
| "step": 187 |
| }, |
| { |
| "epoch": 0.188, |
| "grad_norm": 0.7049283385276794, |
| "grad_norm_var": 0.008729678519272104, |
| "learning_rate": 5e-05, |
| "loss": 1.9657, |
| "loss/crossentropy": 3.045696973800659, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2810002863407135, |
| "loss/reg": 1.684686303138733, |
| "step": 188 |
| }, |
| { |
| "epoch": 0.189, |
| "grad_norm": 0.6447752118110657, |
| "grad_norm_var": 0.008620286357623982, |
| "learning_rate": 5e-05, |
| "loss": 1.9143, |
| "loss/crossentropy": 2.979143977165222, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2483573853969574, |
| "loss/reg": 1.6659739017486572, |
| "step": 189 |
| }, |
| { |
| "epoch": 0.19, |
| "grad_norm": 0.7084380984306335, |
| "grad_norm_var": 0.008594058188990875, |
| "learning_rate": 5e-05, |
| "loss": 1.9094, |
| "loss/crossentropy": 3.0037381649017334, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.26224710047245026, |
| "loss/reg": 1.647152066230774, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.191, |
| "grad_norm": 0.6292643547058105, |
| "grad_norm_var": 0.008425587307729273, |
| "learning_rate": 5e-05, |
| "loss": 1.8728, |
| "loss/crossentropy": 2.776616096496582, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24308519065380096, |
| "loss/reg": 1.629696011543274, |
| "step": 191 |
| }, |
| { |
| "epoch": 0.192, |
| "grad_norm": 0.6195903420448303, |
| "grad_norm_var": 0.008639217886339508, |
| "learning_rate": 5e-05, |
| "loss": 1.8582, |
| "loss/crossentropy": 2.8793495893478394, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24671810865402222, |
| "loss/reg": 1.6115295886993408, |
| "step": 192 |
| }, |
| { |
| "epoch": 0.193, |
| "grad_norm": 0.6114274263381958, |
| "grad_norm_var": 0.008344792897513272, |
| "learning_rate": 5e-05, |
| "loss": 1.824, |
| "loss/crossentropy": 2.8879438638687134, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.23117925971746445, |
| "loss/reg": 1.5927931070327759, |
| "step": 193 |
| }, |
| { |
| "epoch": 0.194, |
| "grad_norm": 0.706379771232605, |
| "grad_norm_var": 0.008162836976792187, |
| "learning_rate": 5e-05, |
| "loss": 1.8412, |
| "loss/crossentropy": 3.093979835510254, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2654081881046295, |
| "loss/reg": 1.5757418870925903, |
| "step": 194 |
| }, |
| { |
| "epoch": 0.195, |
| "grad_norm": 0.6785649061203003, |
| "grad_norm_var": 0.008110735383201017, |
| "learning_rate": 5e-05, |
| "loss": 1.8261, |
| "loss/crossentropy": 2.91018807888031, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2684011161327362, |
| "loss/reg": 1.5576666593551636, |
| "step": 195 |
| }, |
| { |
| "epoch": 0.196, |
| "grad_norm": 0.9223871827125549, |
| "grad_norm_var": 0.006398627932527212, |
| "learning_rate": 5e-05, |
| "loss": 1.8216, |
| "loss/crossentropy": 2.6345103979110718, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.28215403854846954, |
| "loss/reg": 1.5394768714904785, |
| "step": 196 |
| }, |
| { |
| "epoch": 0.197, |
| "grad_norm": 0.6953710913658142, |
| "grad_norm_var": 0.0061885688973583415, |
| "learning_rate": 5e-05, |
| "loss": 1.7887, |
| "loss/crossentropy": 2.8300334215164185, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.26530228555202484, |
| "loss/reg": 1.5233852863311768, |
| "step": 197 |
| }, |
| { |
| "epoch": 0.198, |
| "grad_norm": 0.7092037200927734, |
| "grad_norm_var": 0.006212151263638773, |
| "learning_rate": 5e-05, |
| "loss": 1.796, |
| "loss/crossentropy": 2.900989532470703, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.289993479847908, |
| "loss/reg": 1.5060181617736816, |
| "step": 198 |
| }, |
| { |
| "epoch": 0.199, |
| "grad_norm": 0.687549889087677, |
| "grad_norm_var": 0.0057434721849939285, |
| "learning_rate": 5e-05, |
| "loss": 1.7801, |
| "loss/crossentropy": 2.615636467933655, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2899749279022217, |
| "loss/reg": 1.4900915622711182, |
| "step": 199 |
| }, |
| { |
| "epoch": 0.2, |
| "grad_norm": 0.6979121565818787, |
| "grad_norm_var": 0.00559305863393457, |
| "learning_rate": 5e-05, |
| "loss": 1.7404, |
| "loss/crossentropy": 2.9122893810272217, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.26565686613321304, |
| "loss/reg": 1.4747281074523926, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.201, |
| "grad_norm": 0.6851444840431213, |
| "grad_norm_var": 0.005394694185855483, |
| "learning_rate": 5e-05, |
| "loss": 1.7416, |
| "loss/crossentropy": 2.9977253675460815, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2822398692369461, |
| "loss/reg": 1.4594069719314575, |
| "step": 201 |
| }, |
| { |
| "epoch": 0.202, |
| "grad_norm": 0.7002301216125488, |
| "grad_norm_var": 0.0052884693971790705, |
| "learning_rate": 5e-05, |
| "loss": 1.7267, |
| "loss/crossentropy": 2.8535778522491455, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2828134596347809, |
| "loss/reg": 1.443840503692627, |
| "step": 202 |
| }, |
| { |
| "epoch": 0.203, |
| "grad_norm": 0.7128337025642395, |
| "grad_norm_var": 0.004840302523163113, |
| "learning_rate": 5e-05, |
| "loss": 1.7112, |
| "loss/crossentropy": 2.9661608934402466, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2828420251607895, |
| "loss/reg": 1.4283322095870972, |
| "step": 203 |
| }, |
| { |
| "epoch": 0.204, |
| "grad_norm": 0.6166555285453796, |
| "grad_norm_var": 0.005206041385155657, |
| "learning_rate": 5e-05, |
| "loss": 1.6562, |
| "loss/crossentropy": 2.8527190685272217, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24381333589553833, |
| "loss/reg": 1.4123433828353882, |
| "step": 204 |
| }, |
| { |
| "epoch": 0.205, |
| "grad_norm": 0.7114465832710266, |
| "grad_norm_var": 0.0050897613394295625, |
| "learning_rate": 5e-05, |
| "loss": 1.6716, |
| "loss/crossentropy": 3.0159157514572144, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2747664153575897, |
| "loss/reg": 1.3968148231506348, |
| "step": 205 |
| }, |
| { |
| "epoch": 0.206, |
| "grad_norm": 0.7074129581451416, |
| "grad_norm_var": 0.005087754442248723, |
| "learning_rate": 5e-05, |
| "loss": 1.6706, |
| "loss/crossentropy": 2.896737217903137, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.28868268430233, |
| "loss/reg": 1.3819215297698975, |
| "step": 206 |
| }, |
| { |
| "epoch": 0.207, |
| "grad_norm": 0.7572155594825745, |
| "grad_norm_var": 0.005020035726158708, |
| "learning_rate": 5e-05, |
| "loss": 1.6751, |
| "loss/crossentropy": 2.909687876701355, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30872002243995667, |
| "loss/reg": 1.366390585899353, |
| "step": 207 |
| }, |
| { |
| "epoch": 0.208, |
| "grad_norm": 0.625473141670227, |
| "grad_norm_var": 0.0049581801003419764, |
| "learning_rate": 5e-05, |
| "loss": 1.6005, |
| "loss/crossentropy": 2.992057204246521, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24918101727962494, |
| "loss/reg": 1.3513410091400146, |
| "step": 208 |
| }, |
| { |
| "epoch": 0.209, |
| "grad_norm": 0.753361701965332, |
| "grad_norm_var": 0.004511249961185711, |
| "learning_rate": 5e-05, |
| "loss": 1.6357, |
| "loss/crossentropy": 2.906985878944397, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2985745370388031, |
| "loss/reg": 1.337154746055603, |
| "step": 209 |
| }, |
| { |
| "epoch": 0.21, |
| "grad_norm": 0.6651209592819214, |
| "grad_norm_var": 0.004640014328985472, |
| "learning_rate": 5e-05, |
| "loss": 1.5783, |
| "loss/crossentropy": 2.746882438659668, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.255014605820179, |
| "loss/reg": 1.3232678174972534, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.211, |
| "grad_norm": 0.7629631161689758, |
| "grad_norm_var": 0.004755457989288677, |
| "learning_rate": 5e-05, |
| "loss": 1.6132, |
| "loss/crossentropy": 2.9214253425598145, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30417484045028687, |
| "loss/reg": 1.3090054988861084, |
| "step": 211 |
| }, |
| { |
| "epoch": 0.212, |
| "grad_norm": 0.9364895820617676, |
| "grad_norm_var": 0.005161334564255648, |
| "learning_rate": 5e-05, |
| "loss": 1.5439, |
| "loss/crossentropy": 2.6135218143463135, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.24877788126468658, |
| "loss/reg": 1.295088291168213, |
| "step": 212 |
| }, |
| { |
| "epoch": 0.213, |
| "grad_norm": 0.7151551842689514, |
| "grad_norm_var": 0.005136593544237049, |
| "learning_rate": 5e-05, |
| "loss": 1.5671, |
| "loss/crossentropy": 2.820131778717041, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.28553374111652374, |
| "loss/reg": 1.2815275192260742, |
| "step": 213 |
| }, |
| { |
| "epoch": 0.214, |
| "grad_norm": 0.6173150539398193, |
| "grad_norm_var": 0.0057385208676978385, |
| "learning_rate": 5e-05, |
| "loss": 1.5244, |
| "loss/crossentropy": 2.868087410926819, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.25565874576568604, |
| "loss/reg": 1.2687426805496216, |
| "step": 214 |
| }, |
| { |
| "epoch": 0.215, |
| "grad_norm": 0.6562452912330627, |
| "grad_norm_var": 0.0058914610408413726, |
| "learning_rate": 5e-05, |
| "loss": 1.5326, |
| "loss/crossentropy": 2.909465789794922, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2767190784215927, |
| "loss/reg": 1.2559149265289307, |
| "step": 215 |
| }, |
| { |
| "epoch": 0.216, |
| "grad_norm": 0.7009130120277405, |
| "grad_norm_var": 0.005888163245655642, |
| "learning_rate": 5e-05, |
| "loss": 1.5031, |
| "loss/crossentropy": 2.9817615747451782, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.259988933801651, |
| "loss/reg": 1.243100643157959, |
| "step": 216 |
| }, |
| { |
| "epoch": 0.217, |
| "grad_norm": 0.7496820688247681, |
| "grad_norm_var": 0.005953974184021149, |
| "learning_rate": 5e-05, |
| "loss": 1.5033, |
| "loss/crossentropy": 2.944424033164978, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.27260367572307587, |
| "loss/reg": 1.2307254076004028, |
| "step": 217 |
| }, |
| { |
| "epoch": 0.218, |
| "grad_norm": 0.8249980807304382, |
| "grad_norm_var": 0.006734738877791679, |
| "learning_rate": 5e-05, |
| "loss": 1.5135, |
| "loss/crossentropy": 3.057396650314331, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.29501117765903473, |
| "loss/reg": 1.218528389930725, |
| "step": 218 |
| }, |
| { |
| "epoch": 0.219, |
| "grad_norm": 0.8628643155097961, |
| "grad_norm_var": 0.008006607538223997, |
| "learning_rate": 5e-05, |
| "loss": 1.5072, |
| "loss/crossentropy": 2.8148328065872192, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3009510338306427, |
| "loss/reg": 1.2062116861343384, |
| "step": 219 |
| }, |
| { |
| "epoch": 0.22, |
| "grad_norm": 0.7126441597938538, |
| "grad_norm_var": 0.007145182407312693, |
| "learning_rate": 5e-05, |
| "loss": 1.4742, |
| "loss/crossentropy": 2.863771080970764, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.27980826795101166, |
| "loss/reg": 1.194394826889038, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.221, |
| "grad_norm": 0.7264153361320496, |
| "grad_norm_var": 0.007112264898622778, |
| "learning_rate": 5e-05, |
| "loss": 1.4735, |
| "loss/crossentropy": 2.884490489959717, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.29168978333473206, |
| "loss/reg": 1.1818419694900513, |
| "step": 221 |
| }, |
| { |
| "epoch": 0.222, |
| "grad_norm": 0.6884644627571106, |
| "grad_norm_var": 0.007206656181989975, |
| "learning_rate": 5e-05, |
| "loss": 1.438, |
| "loss/crossentropy": 2.7145625352859497, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2684813439846039, |
| "loss/reg": 1.169567584991455, |
| "step": 222 |
| }, |
| { |
| "epoch": 0.223, |
| "grad_norm": 0.9131287932395935, |
| "grad_norm_var": 0.009193870623184294, |
| "learning_rate": 5e-05, |
| "loss": 1.4565, |
| "loss/crossentropy": 2.720183491706848, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2985244393348694, |
| "loss/reg": 1.1579620838165283, |
| "step": 223 |
| }, |
| { |
| "epoch": 0.224, |
| "grad_norm": 1.1143068075180054, |
| "grad_norm_var": 0.016373975609130238, |
| "learning_rate": 5e-05, |
| "loss": 1.4432, |
| "loss/crossentropy": 2.6667935848236084, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.29670025408267975, |
| "loss/reg": 1.1464924812316895, |
| "step": 224 |
| }, |
| { |
| "epoch": 0.225, |
| "grad_norm": 0.7764210104942322, |
| "grad_norm_var": 0.016340667229655267, |
| "learning_rate": 5e-05, |
| "loss": 1.4081, |
| "loss/crossentropy": 2.9366711378097534, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2730083018541336, |
| "loss/reg": 1.1351009607315063, |
| "step": 225 |
| }, |
| { |
| "epoch": 0.226, |
| "grad_norm": 0.9689040184020996, |
| "grad_norm_var": 0.017599293752276764, |
| "learning_rate": 5e-05, |
| "loss": 1.3899, |
| "loss/crossentropy": 2.7855184078216553, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.26647692918777466, |
| "loss/reg": 1.1233820915222168, |
| "step": 226 |
| }, |
| { |
| "epoch": 0.227, |
| "grad_norm": 0.8031681180000305, |
| "grad_norm_var": 0.01752626708873953, |
| "learning_rate": 5e-05, |
| "loss": 1.4003, |
| "loss/crossentropy": 2.986753463745117, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2886398732662201, |
| "loss/reg": 1.1116759777069092, |
| "step": 227 |
| }, |
| { |
| "epoch": 0.228, |
| "grad_norm": 0.8463804125785828, |
| "grad_norm_var": 0.01636919082708766, |
| "learning_rate": 5e-05, |
| "loss": 1.3672, |
| "loss/crossentropy": 2.873613715171814, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.26657989621162415, |
| "loss/reg": 1.100651502609253, |
| "step": 228 |
| }, |
| { |
| "epoch": 0.229, |
| "grad_norm": 0.6948341131210327, |
| "grad_norm_var": 0.01660405689217973, |
| "learning_rate": 5e-05, |
| "loss": 1.3706, |
| "loss/crossentropy": 2.7391839027404785, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.28144964575767517, |
| "loss/reg": 1.0892000198364258, |
| "step": 229 |
| }, |
| { |
| "epoch": 0.23, |
| "grad_norm": 0.9509689807891846, |
| "grad_norm_var": 0.015833205826391236, |
| "learning_rate": 5e-05, |
| "loss": 1.3857, |
| "loss/crossentropy": 2.9197545051574707, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30744266510009766, |
| "loss/reg": 1.0782514810562134, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.231, |
| "grad_norm": 0.748853862285614, |
| "grad_norm_var": 0.014447279943951334, |
| "learning_rate": 5e-05, |
| "loss": 1.3604, |
| "loss/crossentropy": 2.839870572090149, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.29206788539886475, |
| "loss/reg": 1.0683743953704834, |
| "step": 231 |
| }, |
| { |
| "epoch": 0.232, |
| "grad_norm": 0.8095267415046692, |
| "grad_norm_var": 0.013493527951780953, |
| "learning_rate": 5e-05, |
| "loss": 1.3657, |
| "loss/crossentropy": 2.9903966188430786, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3085637092590332, |
| "loss/reg": 1.0571743249893188, |
| "step": 232 |
| }, |
| { |
| "epoch": 0.233, |
| "grad_norm": 0.8107147216796875, |
| "grad_norm_var": 0.013117717721529453, |
| "learning_rate": 5e-05, |
| "loss": 1.3398, |
| "loss/crossentropy": 2.8481212854385376, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.29325956106185913, |
| "loss/reg": 1.0465056896209717, |
| "step": 233 |
| }, |
| { |
| "epoch": 0.234, |
| "grad_norm": 0.7831809520721436, |
| "grad_norm_var": 0.013245348165944412, |
| "learning_rate": 5e-05, |
| "loss": 1.3358, |
| "loss/crossentropy": 2.9512486457824707, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2999182194471359, |
| "loss/reg": 1.0358693599700928, |
| "step": 234 |
| }, |
| { |
| "epoch": 0.235, |
| "grad_norm": 0.8809396028518677, |
| "grad_norm_var": 0.013355399085581714, |
| "learning_rate": 5e-05, |
| "loss": 1.3448, |
| "loss/crossentropy": 3.005630373954773, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3190816640853882, |
| "loss/reg": 1.0257478952407837, |
| "step": 235 |
| }, |
| { |
| "epoch": 0.236, |
| "grad_norm": 0.9857144355773926, |
| "grad_norm_var": 0.01385939927712793, |
| "learning_rate": 5e-05, |
| "loss": 1.312, |
| "loss/crossentropy": 3.0213024616241455, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2961515337228775, |
| "loss/reg": 1.0158976316452026, |
| "step": 236 |
| }, |
| { |
| "epoch": 0.237, |
| "grad_norm": 0.7648155689239502, |
| "grad_norm_var": 0.013350188111776958, |
| "learning_rate": 5e-05, |
| "loss": 1.2799, |
| "loss/crossentropy": 2.895354390144348, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.27419954538345337, |
| "loss/reg": 1.0056633949279785, |
| "step": 237 |
| }, |
| { |
| "epoch": 0.238, |
| "grad_norm": 0.7705875635147095, |
| "grad_norm_var": 0.01204376838974491, |
| "learning_rate": 5e-05, |
| "loss": 1.291, |
| "loss/crossentropy": 2.947967290878296, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2946975827217102, |
| "loss/reg": 0.9963375329971313, |
| "step": 238 |
| }, |
| { |
| "epoch": 0.239, |
| "grad_norm": 0.7487175464630127, |
| "grad_norm_var": 0.012380084421218606, |
| "learning_rate": 5e-05, |
| "loss": 1.3084, |
| "loss/crossentropy": 3.0445579290390015, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32178574800491333, |
| "loss/reg": 0.9866136908531189, |
| "step": 239 |
| }, |
| { |
| "epoch": 0.24, |
| "grad_norm": 0.7019728422164917, |
| "grad_norm_var": 0.007987457273487058, |
| "learning_rate": 5e-05, |
| "loss": 1.236, |
| "loss/crossentropy": 2.880319118499756, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2595532685518265, |
| "loss/reg": 0.9764720797538757, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.241, |
| "grad_norm": 1.1456328630447388, |
| "grad_norm_var": 0.014590579180741218, |
| "learning_rate": 5e-05, |
| "loss": 1.2864, |
| "loss/crossentropy": 2.9781651496887207, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3202568143606186, |
| "loss/reg": 0.9661031365394592, |
| "step": 241 |
| }, |
| { |
| "epoch": 0.242, |
| "grad_norm": 0.8946651816368103, |
| "grad_norm_var": 0.013643563414677103, |
| "learning_rate": 5e-05, |
| "loss": 1.2993, |
| "loss/crossentropy": 3.0756888389587402, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.34303000569343567, |
| "loss/reg": 0.956248939037323, |
| "step": 242 |
| }, |
| { |
| "epoch": 0.243, |
| "grad_norm": 0.8200750946998596, |
| "grad_norm_var": 0.013592394267520886, |
| "learning_rate": 5e-05, |
| "loss": 1.2815, |
| "loss/crossentropy": 2.9398674964904785, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.33493560552597046, |
| "loss/reg": 0.946540892124176, |
| "step": 243 |
| }, |
| { |
| "epoch": 0.244, |
| "grad_norm": 0.7729702591896057, |
| "grad_norm_var": 0.013816338077596043, |
| "learning_rate": 5e-05, |
| "loss": 1.243, |
| "loss/crossentropy": 3.0726516246795654, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30656400322914124, |
| "loss/reg": 0.9364368319511414, |
| "step": 244 |
| }, |
| { |
| "epoch": 0.245, |
| "grad_norm": 0.816540539264679, |
| "grad_norm_var": 0.012544479226511326, |
| "learning_rate": 5e-05, |
| "loss": 1.2548, |
| "loss/crossentropy": 2.968939185142517, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32855695486068726, |
| "loss/reg": 0.9261980056762695, |
| "step": 245 |
| }, |
| { |
| "epoch": 0.246, |
| "grad_norm": 0.6727208495140076, |
| "grad_norm_var": 0.013187311357218201, |
| "learning_rate": 5e-05, |
| "loss": 1.2027, |
| "loss/crossentropy": 2.84572696685791, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2862945795059204, |
| "loss/reg": 0.9164278507232666, |
| "step": 246 |
| }, |
| { |
| "epoch": 0.247, |
| "grad_norm": 1.016327142715454, |
| "grad_norm_var": 0.015104387701928523, |
| "learning_rate": 5e-05, |
| "loss": 1.2505, |
| "loss/crossentropy": 3.1467254161834717, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.34335389733314514, |
| "loss/reg": 0.9071752429008484, |
| "step": 247 |
| }, |
| { |
| "epoch": 0.248, |
| "grad_norm": 0.7945390939712524, |
| "grad_norm_var": 0.01517371573805364, |
| "learning_rate": 5e-05, |
| "loss": 1.2133, |
| "loss/crossentropy": 3.0013450384140015, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.31558404862880707, |
| "loss/reg": 0.8976951241493225, |
| "step": 248 |
| }, |
| { |
| "epoch": 0.249, |
| "grad_norm": 0.7104422450065613, |
| "grad_norm_var": 0.016143619890534387, |
| "learning_rate": 5e-05, |
| "loss": 1.1662, |
| "loss/crossentropy": 2.7924689054489136, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.27776099741458893, |
| "loss/reg": 0.8883988261222839, |
| "step": 249 |
| }, |
| { |
| "epoch": 0.25, |
| "grad_norm": 0.8339412212371826, |
| "grad_norm_var": 0.01598785162887655, |
| "learning_rate": 5e-05, |
| "loss": 1.1955, |
| "loss/crossentropy": 2.3090237379074097, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.315933033823967, |
| "loss/reg": 0.879578709602356, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.251, |
| "grad_norm": 0.8716294765472412, |
| "grad_norm_var": 0.015933961076213164, |
| "learning_rate": 5e-05, |
| "loss": 1.1673, |
| "loss/crossentropy": 3.037050485610962, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2962581366300583, |
| "loss/reg": 0.8710877895355225, |
| "step": 251 |
| }, |
| { |
| "epoch": 0.252, |
| "grad_norm": 0.8150233626365662, |
| "grad_norm_var": 0.014269785703674135, |
| "learning_rate": 5e-05, |
| "loss": 1.1592, |
| "loss/crossentropy": 3.1129109859466553, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2970938980579376, |
| "loss/reg": 0.8621120452880859, |
| "step": 252 |
| }, |
| { |
| "epoch": 0.253, |
| "grad_norm": 0.7315642237663269, |
| "grad_norm_var": 0.014592029154893696, |
| "learning_rate": 5e-05, |
| "loss": 1.1471, |
| "loss/crossentropy": 2.889196276664734, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.2937185764312744, |
| "loss/reg": 0.8534132838249207, |
| "step": 253 |
| }, |
| { |
| "epoch": 0.254, |
| "grad_norm": 0.7803939580917358, |
| "grad_norm_var": 0.014533648375878212, |
| "learning_rate": 5e-05, |
| "loss": 1.1458, |
| "loss/crossentropy": 3.075181007385254, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3009389489889145, |
| "loss/reg": 0.8448362350463867, |
| "step": 254 |
| }, |
| { |
| "epoch": 0.255, |
| "grad_norm": 0.7920603156089783, |
| "grad_norm_var": 0.01423653210848254, |
| "learning_rate": 5e-05, |
| "loss": 1.1582, |
| "loss/crossentropy": 2.8265258073806763, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3221704363822937, |
| "loss/reg": 0.8360778093338013, |
| "step": 255 |
| }, |
| { |
| "epoch": 0.256, |
| "grad_norm": 0.788984477519989, |
| "grad_norm_var": 0.013303806584813968, |
| "learning_rate": 5e-05, |
| "loss": 1.1298, |
| "loss/crossentropy": 2.901215076446533, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3016386032104492, |
| "loss/reg": 0.828145444393158, |
| "step": 256 |
| }, |
| { |
| "epoch": 0.257, |
| "grad_norm": 0.7980138063430786, |
| "grad_norm_var": 0.006161762816175494, |
| "learning_rate": 5e-05, |
| "loss": 1.1397, |
| "loss/crossentropy": 2.903734564781189, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.31865233182907104, |
| "loss/reg": 0.8210075497627258, |
| "step": 257 |
| }, |
| { |
| "epoch": 0.258, |
| "grad_norm": 0.7274736166000366, |
| "grad_norm_var": 0.0059516379610364675, |
| "learning_rate": 5e-05, |
| "loss": 1.1324, |
| "loss/crossentropy": 3.0283299684524536, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3197466880083084, |
| "loss/reg": 0.8126904964447021, |
| "step": 258 |
| }, |
| { |
| "epoch": 0.259, |
| "grad_norm": 0.7772271037101746, |
| "grad_norm_var": 0.005931234497544683, |
| "learning_rate": 5e-05, |
| "loss": 1.1272, |
| "loss/crossentropy": 2.850598692893982, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3224297910928726, |
| "loss/reg": 0.8047308921813965, |
| "step": 259 |
| }, |
| { |
| "epoch": 0.26, |
| "grad_norm": 0.7773681282997131, |
| "grad_norm_var": 0.005920263883812149, |
| "learning_rate": 5e-05, |
| "loss": 1.1049, |
| "loss/crossentropy": 2.613164782524109, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3078983873128891, |
| "loss/reg": 0.7969905734062195, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.261, |
| "grad_norm": 0.8489705920219421, |
| "grad_norm_var": 0.006083393660339676, |
| "learning_rate": 5e-05, |
| "loss": 1.1097, |
| "loss/crossentropy": 2.8783843517303467, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32094669342041016, |
| "loss/reg": 0.7887587547302246, |
| "step": 261 |
| }, |
| { |
| "epoch": 0.262, |
| "grad_norm": 0.8952324390411377, |
| "grad_norm_var": 0.005519124480930238, |
| "learning_rate": 5e-05, |
| "loss": 1.0874, |
| "loss/crossentropy": 2.8729227781295776, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30694882571697235, |
| "loss/reg": 0.7804424166679382, |
| "step": 262 |
| }, |
| { |
| "epoch": 0.263, |
| "grad_norm": 1.0492231845855713, |
| "grad_norm_var": 0.0064919600576624605, |
| "learning_rate": 5e-05, |
| "loss": 1.1143, |
| "loss/crossentropy": 3.4083826541900635, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3417075276374817, |
| "loss/reg": 0.7725980877876282, |
| "step": 263 |
| }, |
| { |
| "epoch": 0.264, |
| "grad_norm": 0.8685043454170227, |
| "grad_norm_var": 0.0066616348925386, |
| "learning_rate": 5e-05, |
| "loss": 1.1049, |
| "loss/crossentropy": 3.001236915588379, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3399762958288193, |
| "loss/reg": 0.7648863196372986, |
| "step": 264 |
| }, |
| { |
| "epoch": 0.265, |
| "grad_norm": 0.8184032440185547, |
| "grad_norm_var": 0.0058615817369480185, |
| "learning_rate": 5e-05, |
| "loss": 1.0755, |
| "loss/crossentropy": 2.906570553779602, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.31862136721611023, |
| "loss/reg": 0.7568546533584595, |
| "step": 265 |
| }, |
| { |
| "epoch": 0.266, |
| "grad_norm": 0.7505112886428833, |
| "grad_norm_var": 0.006179087172862552, |
| "learning_rate": 5e-05, |
| "loss": 1.06, |
| "loss/crossentropy": 2.804160475730896, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.31044724583625793, |
| "loss/reg": 0.749531626701355, |
| "step": 266 |
| }, |
| { |
| "epoch": 0.267, |
| "grad_norm": 0.7922502756118774, |
| "grad_norm_var": 0.006007003612818984, |
| "learning_rate": 5e-05, |
| "loss": 1.051, |
| "loss/crossentropy": 2.9184391498565674, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3090463727712631, |
| "loss/reg": 0.7419831156730652, |
| "step": 267 |
| }, |
| { |
| "epoch": 0.268, |
| "grad_norm": 0.8705897331237793, |
| "grad_norm_var": 0.0062134869577634404, |
| "learning_rate": 5e-05, |
| "loss": 1.0859, |
| "loss/crossentropy": 2.798104166984558, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3518783301115036, |
| "loss/reg": 0.7340484857559204, |
| "step": 268 |
| }, |
| { |
| "epoch": 0.269, |
| "grad_norm": 0.802116870880127, |
| "grad_norm_var": 0.0057239701747472315, |
| "learning_rate": 5e-05, |
| "loss": 1.0206, |
| "loss/crossentropy": 2.8586721420288086, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.29320792853832245, |
| "loss/reg": 0.7273575067520142, |
| "step": 269 |
| }, |
| { |
| "epoch": 0.27, |
| "grad_norm": 0.8284055590629578, |
| "grad_norm_var": 0.005607568831759228, |
| "learning_rate": 5e-05, |
| "loss": 1.056, |
| "loss/crossentropy": 3.032300353050232, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3356429636478424, |
| "loss/reg": 0.7204054594039917, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.271, |
| "grad_norm": 2.0836539268493652, |
| "grad_norm_var": 0.10435616629289696, |
| "learning_rate": 5e-05, |
| "loss": 1.0852, |
| "loss/crossentropy": 3.399788737297058, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.37154923379421234, |
| "loss/reg": 0.7136300802230835, |
| "step": 271 |
| }, |
| { |
| "epoch": 0.272, |
| "grad_norm": 1.0484439134597778, |
| "grad_norm_var": 0.10455674750312541, |
| "learning_rate": 5e-05, |
| "loss": 1.0175, |
| "loss/crossentropy": 2.908281922340393, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3110799193382263, |
| "loss/reg": 0.7064634561538696, |
| "step": 272 |
| }, |
| { |
| "epoch": 0.273, |
| "grad_norm": 1.147485375404358, |
| "grad_norm_var": 0.10645807565400565, |
| "learning_rate": 5e-05, |
| "loss": 1.0645, |
| "loss/crossentropy": 3.035453200340271, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.36423003673553467, |
| "loss/reg": 0.7002757787704468, |
| "step": 273 |
| }, |
| { |
| "epoch": 0.274, |
| "grad_norm": 0.8011353611946106, |
| "grad_norm_var": 0.10468171123688919, |
| "learning_rate": 5e-05, |
| "loss": 1.015, |
| "loss/crossentropy": 2.9256083965301514, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3210597187280655, |
| "loss/reg": 0.6938965320587158, |
| "step": 274 |
| }, |
| { |
| "epoch": 0.275, |
| "grad_norm": 0.8387688994407654, |
| "grad_norm_var": 0.10352148211477896, |
| "learning_rate": 5e-05, |
| "loss": 1.0035, |
| "loss/crossentropy": 2.907983660697937, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.31610578298568726, |
| "loss/reg": 0.687362551689148, |
| "step": 275 |
| }, |
| { |
| "epoch": 0.276, |
| "grad_norm": 0.8177564740180969, |
| "grad_norm_var": 0.10268670196313608, |
| "learning_rate": 5e-05, |
| "loss": 0.9856, |
| "loss/crossentropy": 2.863765835762024, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30466218292713165, |
| "loss/reg": 0.6809385418891907, |
| "step": 276 |
| }, |
| { |
| "epoch": 0.277, |
| "grad_norm": 0.89921635389328, |
| "grad_norm_var": 0.10214192116831029, |
| "learning_rate": 5e-05, |
| "loss": 0.9987, |
| "loss/crossentropy": 2.8876839876174927, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32445386052131653, |
| "loss/reg": 0.6742413640022278, |
| "step": 277 |
| }, |
| { |
| "epoch": 0.278, |
| "grad_norm": 1.0634788274765015, |
| "grad_norm_var": 0.10252590105827811, |
| "learning_rate": 5e-05, |
| "loss": 1.0142, |
| "loss/crossentropy": 2.999605178833008, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.34653183817863464, |
| "loss/reg": 0.6676841974258423, |
| "step": 278 |
| }, |
| { |
| "epoch": 0.279, |
| "grad_norm": 0.8068830966949463, |
| "grad_norm_var": 0.10355569161464562, |
| "learning_rate": 5e-05, |
| "loss": 0.974, |
| "loss/crossentropy": 2.9939992427825928, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.31289736926555634, |
| "loss/reg": 0.661117434501648, |
| "step": 279 |
| }, |
| { |
| "epoch": 0.28, |
| "grad_norm": 0.8449327945709229, |
| "grad_norm_var": 0.10385393471796989, |
| "learning_rate": 5e-05, |
| "loss": 0.9846, |
| "loss/crossentropy": 3.0568209886550903, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32985930144786835, |
| "loss/reg": 0.6547542810440063, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.281, |
| "grad_norm": 0.8780697584152222, |
| "grad_norm_var": 0.10302254089375525, |
| "learning_rate": 5e-05, |
| "loss": 0.9807, |
| "loss/crossentropy": 2.868246555328369, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.33173659443855286, |
| "loss/reg": 0.6489641070365906, |
| "step": 281 |
| }, |
| { |
| "epoch": 0.282, |
| "grad_norm": 0.8639450073242188, |
| "grad_norm_var": 0.10073990944600125, |
| "learning_rate": 5e-05, |
| "loss": 0.9501, |
| "loss/crossentropy": 2.931252360343933, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30781568586826324, |
| "loss/reg": 0.6423049569129944, |
| "step": 282 |
| }, |
| { |
| "epoch": 0.283, |
| "grad_norm": 0.8756406903266907, |
| "grad_norm_var": 0.09929051474966973, |
| "learning_rate": 5e-05, |
| "loss": 0.9945, |
| "loss/crossentropy": 2.997495651245117, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3583994507789612, |
| "loss/reg": 0.6360748410224915, |
| "step": 283 |
| }, |
| { |
| "epoch": 0.284, |
| "grad_norm": 0.9026868939399719, |
| "grad_norm_var": 0.0989426996958554, |
| "learning_rate": 5e-05, |
| "loss": 0.9652, |
| "loss/crossentropy": 3.1916286945343018, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.33506248891353607, |
| "loss/reg": 0.6301226019859314, |
| "step": 284 |
| }, |
| { |
| "epoch": 0.285, |
| "grad_norm": 0.8567055463790894, |
| "grad_norm_var": 0.0979149155759875, |
| "learning_rate": 5e-05, |
| "loss": 0.9381, |
| "loss/crossentropy": 3.099311351776123, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3139878064393997, |
| "loss/reg": 0.6241004467010498, |
| "step": 285 |
| }, |
| { |
| "epoch": 0.286, |
| "grad_norm": 0.7930836081504822, |
| "grad_norm_var": 0.09867069764221635, |
| "learning_rate": 5e-05, |
| "loss": 0.9393, |
| "loss/crossentropy": 3.1035256385803223, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32074499130249023, |
| "loss/reg": 0.6185299158096313, |
| "step": 286 |
| }, |
| { |
| "epoch": 0.287, |
| "grad_norm": 0.7915433049201965, |
| "grad_norm_var": 0.01117600146283356, |
| "learning_rate": 5e-05, |
| "loss": 0.9443, |
| "loss/crossentropy": 3.055205225944519, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3313294053077698, |
| "loss/reg": 0.6129992008209229, |
| "step": 287 |
| }, |
| { |
| "epoch": 0.288, |
| "grad_norm": 0.8326281905174255, |
| "grad_norm_var": 0.009509348738082565, |
| "learning_rate": 5e-05, |
| "loss": 0.9282, |
| "loss/crossentropy": 2.9394391775131226, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32069140672683716, |
| "loss/reg": 0.6075358390808105, |
| "step": 288 |
| }, |
| { |
| "epoch": 0.289, |
| "grad_norm": 0.9906336069107056, |
| "grad_norm_var": 0.005366609159777491, |
| "learning_rate": 5e-05, |
| "loss": 0.977, |
| "loss/crossentropy": 3.184403419494629, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3747285306453705, |
| "loss/reg": 0.6023111343383789, |
| "step": 289 |
| }, |
| { |
| "epoch": 0.29, |
| "grad_norm": 0.8238855004310608, |
| "grad_norm_var": 0.005201989798596651, |
| "learning_rate": 5e-05, |
| "loss": 0.9005, |
| "loss/crossentropy": 2.908714175224304, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.30404825508594513, |
| "loss/reg": 0.5964736938476562, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.291, |
| "grad_norm": 0.8306154012680054, |
| "grad_norm_var": 0.005237369688792152, |
| "learning_rate": 5e-05, |
| "loss": 0.9089, |
| "loss/crossentropy": 2.975302577018738, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.31827835738658905, |
| "loss/reg": 0.5906208157539368, |
| "step": 291 |
| }, |
| { |
| "epoch": 0.292, |
| "grad_norm": 0.7821725010871887, |
| "grad_norm_var": 0.0055500582962433025, |
| "learning_rate": 5e-05, |
| "loss": 0.8966, |
| "loss/crossentropy": 2.869265556335449, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3118269443511963, |
| "loss/reg": 0.5847451686859131, |
| "step": 292 |
| }, |
| { |
| "epoch": 0.293, |
| "grad_norm": 0.8395736813545227, |
| "grad_norm_var": 0.0054983577732205784, |
| "learning_rate": 5e-05, |
| "loss": 0.9068, |
| "loss/crossentropy": 2.989979863166809, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32747338712215424, |
| "loss/reg": 0.5792975425720215, |
| "step": 293 |
| }, |
| { |
| "epoch": 0.294, |
| "grad_norm": 0.7706114053726196, |
| "grad_norm_var": 0.002953639663803583, |
| "learning_rate": 5e-05, |
| "loss": 0.8869, |
| "loss/crossentropy": 2.983855366706848, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3133319616317749, |
| "loss/reg": 0.5736033320426941, |
| "step": 294 |
| }, |
| { |
| "epoch": 0.295, |
| "grad_norm": 0.8381783366203308, |
| "grad_norm_var": 0.0028652913391395973, |
| "learning_rate": 5e-05, |
| "loss": 0.8985, |
| "loss/crossentropy": 2.7056565284729004, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.33057354390621185, |
| "loss/reg": 0.5679031014442444, |
| "step": 295 |
| }, |
| { |
| "epoch": 0.296, |
| "grad_norm": 0.8296598792076111, |
| "grad_norm_var": 0.002879358760706907, |
| "learning_rate": 5e-05, |
| "loss": 0.8825, |
| "loss/crossentropy": 2.871171236038208, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3198034018278122, |
| "loss/reg": 0.5626744627952576, |
| "step": 296 |
| }, |
| { |
| "epoch": 0.297, |
| "grad_norm": 0.8649831414222717, |
| "grad_norm_var": 0.0028301385552169803, |
| "learning_rate": 5e-05, |
| "loss": 0.8756, |
| "loss/crossentropy": 2.9602943658828735, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3187420964241028, |
| "loss/reg": 0.5569052696228027, |
| "step": 297 |
| }, |
| { |
| "epoch": 0.298, |
| "grad_norm": 0.8841449022293091, |
| "grad_norm_var": 0.002912297021114189, |
| "learning_rate": 5e-05, |
| "loss": 0.8724, |
| "loss/crossentropy": 3.027226448059082, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.32116903364658356, |
| "loss/reg": 0.5512411594390869, |
| "step": 298 |
| }, |
| { |
| "epoch": 0.299, |
| "grad_norm": 0.8891116380691528, |
| "grad_norm_var": 0.002980161024870102, |
| "learning_rate": 5e-05, |
| "loss": 0.8925, |
| "loss/crossentropy": 2.952752947807312, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.34697093069553375, |
| "loss/reg": 0.5455691814422607, |
| "step": 299 |
| }, |
| { |
| "epoch": 0.3, |
| "grad_norm": 0.842205286026001, |
| "grad_norm_var": 0.0027436977427273577, |
| "learning_rate": 5e-05, |
| "loss": 0.8671, |
| "loss/crossentropy": 2.848607659339905, |
| "loss/hidden": 0.0, |
| "loss/logits": 0.3269532173871994, |
| "loss/reg": 0.540195882320404, |
| "step": 300 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 1000, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 9223372036854775807, |
| "save_steps": 100, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": true, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 7.72870338772992e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|