| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.0, |
| "eval_steps": 2000, |
| "global_step": 10000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.001, |
| "grad_norm": 4736.0, |
| "learning_rate": 1.9e-05, |
| "loss": 132.1055, |
| "loss/crossentropy": 12.246079635620116, |
| "loss/hidden": 18.7125, |
| "loss/jsd": 0.0, |
| "loss/logits": 10.372939014434815, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.002, |
| "grad_norm": 330.0, |
| "grad_norm_var": 91640269.18333334, |
| "learning_rate": 2.8000000000000003e-05, |
| "loss": 95.9731, |
| "loss/crossentropy": 8.862393474578857, |
| "loss/hidden": 18.675, |
| "loss/jsd": 0.0, |
| "loss/logits": 6.677179157733917, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.003, |
| "grad_norm": 394.0, |
| "grad_norm_var": 237715.45, |
| "learning_rate": 3.7e-05, |
| "loss": 86.3778, |
| "loss/crossentropy": 8.083840227127075, |
| "loss/hidden": 18.259375, |
| "loss/jsd": 0.0, |
| "loss/logits": 6.130921971797943, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.004, |
| "grad_norm": 924.0, |
| "grad_norm_var": 2.6757682503402172e+16, |
| "learning_rate": 4.600000000000001e-05, |
| "loss": 82.5914, |
| "loss/crossentropy": 7.802511918544769, |
| "loss/hidden": 17.440625, |
| "loss/jsd": 0.0, |
| "loss/logits": 5.772503018379211, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.005, |
| "grad_norm": 516.0, |
| "grad_norm_var": 38597.583333333336, |
| "learning_rate": 5.500000000000001e-05, |
| "loss": 75.3397, |
| "loss/crossentropy": 7.156700026988983, |
| "loss/hidden": 17.253125, |
| "loss/jsd": 0.0, |
| "loss/logits": 5.156575608253479, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.006, |
| "grad_norm": 1232.0, |
| "grad_norm_var": 68241.45, |
| "learning_rate": 6.400000000000001e-05, |
| "loss": 61.2745, |
| "loss/crossentropy": 6.0138510942459105, |
| "loss/hidden": 15.80625, |
| "loss/jsd": 0.0, |
| "loss/logits": 3.8037488579750063, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.007, |
| "grad_norm": 376.0, |
| "grad_norm_var": 626103.4, |
| "learning_rate": 7.3e-05, |
| "loss": 41.3695, |
| "loss/crossentropy": 4.422797441482544, |
| "loss/hidden": 13.1125, |
| "loss/jsd": 0.0, |
| "loss/logits": 2.4006322652101515, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.008, |
| "grad_norm": 272.0, |
| "grad_norm_var": 674923.45, |
| "learning_rate": 8.200000000000001e-05, |
| "loss": 27.4755, |
| "loss/crossentropy": 3.3576226443052293, |
| "loss/hidden": 10.7359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 1.3968962401151657, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.009, |
| "grad_norm": 296.0, |
| "grad_norm_var": 15426.383333333333, |
| "learning_rate": 9.1e-05, |
| "loss": 22.6607, |
| "loss/crossentropy": 3.217679074406624, |
| "loss/hidden": 9.2140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 1.055714099109173, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.01, |
| "grad_norm": 328.0, |
| "grad_norm_var": 9349.666666666666, |
| "learning_rate": 0.0001, |
| "loss": 20.3108, |
| "loss/crossentropy": 2.934060016274452, |
| "loss/hidden": 8.40703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.8702833190560341, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.011, |
| "grad_norm": 194.0, |
| "grad_norm_var": 5992.866666666667, |
| "learning_rate": 0.0001, |
| "loss": 18.8852, |
| "loss/crossentropy": 2.8450062334537507, |
| "loss/hidden": 8.221875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.8380498677492142, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.012, |
| "grad_norm": 244.0, |
| "grad_norm_var": 1176.5333333333333, |
| "learning_rate": 0.0001, |
| "loss": 17.97, |
| "loss/crossentropy": 2.612249107658863, |
| "loss/hidden": 7.578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.686215291172266, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.013, |
| "grad_norm": 242.0, |
| "grad_norm_var": 1168.8958333333333, |
| "learning_rate": 0.0001, |
| "loss": 17.2904, |
| "loss/crossentropy": 2.8242316216230394, |
| "loss/hidden": 7.7390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.7805894792079926, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.014, |
| "grad_norm": 179.0, |
| "grad_norm_var": 1465.1333333333334, |
| "learning_rate": 0.0001, |
| "loss": 16.5581, |
| "loss/crossentropy": 2.737143725156784, |
| "loss/hidden": 7.3421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.6888546235859394, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.015, |
| "grad_norm": 175.0, |
| "grad_norm_var": 1119.8625, |
| "learning_rate": 0.0001, |
| "loss": 16.0501, |
| "loss/crossentropy": 2.7599751561880113, |
| "loss/hidden": 7.05703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.6640767879784107, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.016, |
| "grad_norm": 186.0, |
| "grad_norm_var": 1044.5166666666667, |
| "learning_rate": 0.0001, |
| "loss": 15.4631, |
| "loss/crossentropy": 2.6100075274705885, |
| "loss/hidden": 6.8203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.5824844464659691, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.017, |
| "grad_norm": 179.0, |
| "grad_norm_var": 1082.8, |
| "learning_rate": 0.0001, |
| "loss": 15.2201, |
| "loss/crossentropy": 2.4276285111904143, |
| "loss/hidden": 6.8203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.5915141828358174, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.018, |
| "grad_norm": 153.0, |
| "grad_norm_var": 622.6625, |
| "learning_rate": 0.0001, |
| "loss": 14.9606, |
| "loss/crossentropy": 2.630460512638092, |
| "loss/hidden": 6.52578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.5396774187684059, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.019, |
| "grad_norm": 176.0, |
| "grad_norm_var": 1093.2, |
| "learning_rate": 0.0001, |
| "loss": 14.6255, |
| "loss/crossentropy": 2.3158223152160646, |
| "loss/hidden": 6.50390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4905257746577263, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.02, |
| "grad_norm": 112.0, |
| "grad_norm_var": 695.7291666666666, |
| "learning_rate": 0.0001, |
| "loss": 14.3647, |
| "loss/crossentropy": 2.586851382255554, |
| "loss/hidden": 6.42265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.5586091712117195, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.021, |
| "grad_norm": 118.5, |
| "grad_norm_var": 574.3072916666666, |
| "learning_rate": 0.0001, |
| "loss": 14.0867, |
| "loss/crossentropy": 2.5010055124759676, |
| "loss/hidden": 6.34453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4965482771396637, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.022, |
| "grad_norm": 88.5, |
| "grad_norm_var": 662.65, |
| "learning_rate": 0.0001, |
| "loss": 13.6551, |
| "loss/crossentropy": 2.573444625735283, |
| "loss/hidden": 6.33125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.5534068010747433, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.023, |
| "grad_norm": 118.0, |
| "grad_norm_var": 412.1958333333333, |
| "learning_rate": 0.0001, |
| "loss": 13.4715, |
| "loss/crossentropy": 2.4142292886972427, |
| "loss/hidden": 5.96640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.44360905699431896, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.024, |
| "grad_norm": 134.0, |
| "grad_norm_var": 242.9, |
| "learning_rate": 0.0001, |
| "loss": 13.3289, |
| "loss/crossentropy": 2.4670142769813537, |
| "loss/hidden": 5.98671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.47392544001340864, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.025, |
| "grad_norm": 137.0, |
| "grad_norm_var": 158.4625, |
| "learning_rate": 0.0001, |
| "loss": 13.0031, |
| "loss/crossentropy": 2.416000656783581, |
| "loss/hidden": 5.7859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.44607544504106045, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.026, |
| "grad_norm": 109.0, |
| "grad_norm_var": 279.990625, |
| "learning_rate": 0.0001, |
| "loss": 13.0076, |
| "loss/crossentropy": 2.370332670211792, |
| "loss/hidden": 5.9984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.5006627842783928, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.027, |
| "grad_norm": 129.0, |
| "grad_norm_var": 427.37395833333335, |
| "learning_rate": 0.0001, |
| "loss": 12.8809, |
| "loss/crossentropy": 2.281908763945103, |
| "loss/hidden": 5.98671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.45061586182564495, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.028, |
| "grad_norm": 98.0, |
| "grad_norm_var": 278.1489583333333, |
| "learning_rate": 0.0001, |
| "loss": 12.8942, |
| "loss/crossentropy": 2.3922384053468706, |
| "loss/hidden": 5.6984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.44376694336533545, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.029, |
| "grad_norm": 99.5, |
| "grad_norm_var": 303.55, |
| "learning_rate": 0.0001, |
| "loss": 12.7122, |
| "loss/crossentropy": 2.730095013976097, |
| "loss/hidden": 5.49140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4411045670509338, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.03, |
| "grad_norm": 112.5, |
| "grad_norm_var": 359.56666666666666, |
| "learning_rate": 0.0001, |
| "loss": 12.5618, |
| "loss/crossentropy": 2.3741705983877184, |
| "loss/hidden": 5.43203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.40091707594692705, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.031, |
| "grad_norm": 84.5, |
| "grad_norm_var": 245.25729166666667, |
| "learning_rate": 0.0001, |
| "loss": 12.2525, |
| "loss/crossentropy": 2.2781229317188263, |
| "loss/hidden": 5.53515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4274128321558237, |
| "step": 310 |
| }, |
| { |
| "epoch": 0.032, |
| "grad_norm": 108.5, |
| "grad_norm_var": 140.59583333333333, |
| "learning_rate": 0.0001, |
| "loss": 12.2935, |
| "loss/crossentropy": 2.5757294684648513, |
| "loss/hidden": 5.4609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.42916890494525434, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.033, |
| "grad_norm": 108.0, |
| "grad_norm_var": 70.89895833333334, |
| "learning_rate": 0.0001, |
| "loss": 12.1545, |
| "loss/crossentropy": 2.527638339996338, |
| "loss/hidden": 5.378125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4032053742557764, |
| "step": 330 |
| }, |
| { |
| "epoch": 0.034, |
| "grad_norm": 210.0, |
| "grad_norm_var": 1272.465625, |
| "learning_rate": 0.0001, |
| "loss": 12.2482, |
| "loss/crossentropy": 2.5401821002364158, |
| "loss/hidden": 5.390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4444709587842226, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.035, |
| "grad_norm": 79.5, |
| "grad_norm_var": 1376.5958333333333, |
| "learning_rate": 0.0001, |
| "loss": 12.08, |
| "loss/crossentropy": 2.514840933680534, |
| "loss/hidden": 5.2640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4077944982796907, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.036, |
| "grad_norm": 87.0, |
| "grad_norm_var": 418.83229166666666, |
| "learning_rate": 0.0001, |
| "loss": 12.0245, |
| "loss/crossentropy": 2.420889538526535, |
| "loss/hidden": 5.34921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.44222328886389733, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.037, |
| "grad_norm": 76.5, |
| "grad_norm_var": 138.5625, |
| "learning_rate": 0.0001, |
| "loss": 11.7097, |
| "loss/crossentropy": 2.2826619133353234, |
| "loss/hidden": 5.3296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3849468305706978, |
| "step": 370 |
| }, |
| { |
| "epoch": 0.038, |
| "grad_norm": 96.5, |
| "grad_norm_var": 184.93229166666666, |
| "learning_rate": 0.0001, |
| "loss": 11.465, |
| "loss/crossentropy": 2.4052042722702027, |
| "loss/hidden": 5.16796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.40173302926123144, |
| "step": 380 |
| }, |
| { |
| "epoch": 0.039, |
| "grad_norm": 125.5, |
| "grad_norm_var": 183.09583333333333, |
| "learning_rate": 0.0001, |
| "loss": 11.6273, |
| "loss/crossentropy": 2.540145033597946, |
| "loss/hidden": 5.215625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.41224894523620603, |
| "step": 390 |
| }, |
| { |
| "epoch": 0.04, |
| "grad_norm": 83.5, |
| "grad_norm_var": 258.315625, |
| "learning_rate": 0.0001, |
| "loss": 11.397, |
| "loss/crossentropy": 2.207468980550766, |
| "loss/hidden": 5.09296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3590874429792166, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.041, |
| "grad_norm": 94.5, |
| "grad_norm_var": 184.5625, |
| "learning_rate": 0.0001, |
| "loss": 11.443, |
| "loss/crossentropy": 2.4378984421491623, |
| "loss/hidden": 5.21171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.40493359677493573, |
| "step": 410 |
| }, |
| { |
| "epoch": 0.042, |
| "grad_norm": 106.5, |
| "grad_norm_var": 125.590625, |
| "learning_rate": 0.0001, |
| "loss": 11.5678, |
| "loss/crossentropy": 2.518555220961571, |
| "loss/hidden": 5.07265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.4297170080244541, |
| "step": 420 |
| }, |
| { |
| "epoch": 0.043, |
| "grad_norm": 87.5, |
| "grad_norm_var": 115.765625, |
| "learning_rate": 0.0001, |
| "loss": 11.3132, |
| "loss/crossentropy": 2.490597203373909, |
| "loss/hidden": 5.11171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.403754598274827, |
| "step": 430 |
| }, |
| { |
| "epoch": 0.044, |
| "grad_norm": 92.5, |
| "grad_norm_var": 156.35729166666667, |
| "learning_rate": 0.0001, |
| "loss": 11.1476, |
| "loss/crossentropy": 2.037529316544533, |
| "loss/hidden": 5.07421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.35246654506772757, |
| "step": 440 |
| }, |
| { |
| "epoch": 0.045, |
| "grad_norm": 80.5, |
| "grad_norm_var": 210.66666666666666, |
| "learning_rate": 0.0001, |
| "loss": 11.3038, |
| "loss/crossentropy": 2.3201738983392715, |
| "loss/hidden": 5.0828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.38196625709533694, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.046, |
| "grad_norm": 107.5, |
| "grad_norm_var": 284.1666666666667, |
| "learning_rate": 0.0001, |
| "loss": 11.3625, |
| "loss/crossentropy": 2.4791718110442162, |
| "loss/hidden": 4.95546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.36495909169316293, |
| "step": 460 |
| }, |
| { |
| "epoch": 0.047, |
| "grad_norm": 91.5, |
| "grad_norm_var": 247.39895833333333, |
| "learning_rate": 0.0001, |
| "loss": 11.0542, |
| "loss/crossentropy": 2.3155667960643767, |
| "loss/hidden": 4.93828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.362844867631793, |
| "step": 470 |
| }, |
| { |
| "epoch": 0.048, |
| "grad_norm": 95.0, |
| "grad_norm_var": 194.79895833333333, |
| "learning_rate": 0.0001, |
| "loss": 11.2413, |
| "loss/crossentropy": 2.496318203210831, |
| "loss/hidden": 4.840625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3887303464114666, |
| "step": 480 |
| }, |
| { |
| "epoch": 0.049, |
| "grad_norm": 74.5, |
| "grad_norm_var": 243.840625, |
| "learning_rate": 0.0001, |
| "loss": 10.9416, |
| "loss/crossentropy": 2.385223904252052, |
| "loss/hidden": 4.85234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3598880790174007, |
| "step": 490 |
| }, |
| { |
| "epoch": 0.05, |
| "grad_norm": 79.0, |
| "grad_norm_var": 105.990625, |
| "learning_rate": 0.0001, |
| "loss": 10.9114, |
| "loss/crossentropy": 2.2462552055716514, |
| "loss/hidden": 4.80859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3265662036836147, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.051, |
| "grad_norm": 96.5, |
| "grad_norm_var": 138.43229166666666, |
| "learning_rate": 0.0001, |
| "loss": 10.8821, |
| "loss/crossentropy": 2.297148121893406, |
| "loss/hidden": 4.8609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3467547960579395, |
| "step": 510 |
| }, |
| { |
| "epoch": 0.052, |
| "grad_norm": 97.5, |
| "grad_norm_var": 129.365625, |
| "learning_rate": 0.0001, |
| "loss": 10.9299, |
| "loss/crossentropy": 2.4197026968002318, |
| "loss/hidden": 4.7921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3632193084806204, |
| "step": 520 |
| }, |
| { |
| "epoch": 0.053, |
| "grad_norm": 81.5, |
| "grad_norm_var": 99.47395833333333, |
| "learning_rate": 0.0001, |
| "loss": 10.787, |
| "loss/crossentropy": 2.36982424557209, |
| "loss/hidden": 4.825, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3405680742114782, |
| "step": 530 |
| }, |
| { |
| "epoch": 0.054, |
| "grad_norm": 85.5, |
| "grad_norm_var": 48.340625, |
| "learning_rate": 0.0001, |
| "loss": 10.8675, |
| "loss/crossentropy": 2.4611779801547526, |
| "loss/hidden": 4.8625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.36872007288038733, |
| "step": 540 |
| }, |
| { |
| "epoch": 0.055, |
| "grad_norm": 93.5, |
| "grad_norm_var": 84.24895833333333, |
| "learning_rate": 0.0001, |
| "loss": 10.64, |
| "loss/crossentropy": 2.1758567959070207, |
| "loss/hidden": 4.7484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3336840860545635, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.056, |
| "grad_norm": 114.0, |
| "grad_norm_var": 129.53098958333334, |
| "learning_rate": 0.0001, |
| "loss": 10.5615, |
| "loss/crossentropy": 2.3970536097884176, |
| "loss/hidden": 4.7625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.34276723079383375, |
| "step": 560 |
| }, |
| { |
| "epoch": 0.057, |
| "grad_norm": 80.0, |
| "grad_norm_var": 579.57890625, |
| "learning_rate": 0.0001, |
| "loss": 10.8999, |
| "loss/crossentropy": 2.4695185527205465, |
| "loss/hidden": 4.9453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.42829814068973066, |
| "step": 570 |
| }, |
| { |
| "epoch": 0.058, |
| "grad_norm": 85.0, |
| "grad_norm_var": 596.9572916666667, |
| "learning_rate": 0.0001, |
| "loss": 10.8802, |
| "loss/crossentropy": 2.3520184576511385, |
| "loss/hidden": 4.790625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3662864986807108, |
| "step": 580 |
| }, |
| { |
| "epoch": 0.059, |
| "grad_norm": 73.0, |
| "grad_norm_var": 181.69583333333333, |
| "learning_rate": 0.0001, |
| "loss": 10.6744, |
| "loss/crossentropy": 2.2842736929655074, |
| "loss/hidden": 4.71484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3500846643000841, |
| "step": 590 |
| }, |
| { |
| "epoch": 0.06, |
| "grad_norm": 97.0, |
| "grad_norm_var": 160.58307291666668, |
| "learning_rate": 0.0001, |
| "loss": 10.6987, |
| "loss/crossentropy": 2.29906165599823, |
| "loss/hidden": 4.602734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.334361494705081, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.061, |
| "grad_norm": 89.0, |
| "grad_norm_var": 162.67682291666668, |
| "learning_rate": 0.0001, |
| "loss": 10.6143, |
| "loss/crossentropy": 2.3032930195331573, |
| "loss/hidden": 4.6703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3258141163736582, |
| "step": 610 |
| }, |
| { |
| "epoch": 0.062, |
| "grad_norm": 77.5, |
| "grad_norm_var": 97.12916666666666, |
| "learning_rate": 0.0001, |
| "loss": 10.5946, |
| "loss/crossentropy": 2.452244046330452, |
| "loss/hidden": 4.7109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3432691916823387, |
| "step": 620 |
| }, |
| { |
| "epoch": 0.063, |
| "grad_norm": 75.5, |
| "grad_norm_var": 227.69973958333333, |
| "learning_rate": 0.0001, |
| "loss": 10.6287, |
| "loss/crossentropy": 2.2894835874438284, |
| "loss/hidden": 4.74609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.35672005768865345, |
| "step": 630 |
| }, |
| { |
| "epoch": 0.064, |
| "grad_norm": 70.0, |
| "grad_norm_var": 541.2322916666667, |
| "learning_rate": 0.0001, |
| "loss": 10.6195, |
| "loss/crossentropy": 2.4114772886037827, |
| "loss/hidden": 4.70546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.35591375902295114, |
| "step": 640 |
| }, |
| { |
| "epoch": 0.065, |
| "grad_norm": 77.0, |
| "grad_norm_var": 435.15390625, |
| "learning_rate": 0.0001, |
| "loss": 10.4142, |
| "loss/crossentropy": 2.332440134882927, |
| "loss/hidden": 4.634375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.339809150993824, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.066, |
| "grad_norm": 71.5, |
| "grad_norm_var": 118.03307291666667, |
| "learning_rate": 0.0001, |
| "loss": 10.4602, |
| "loss/crossentropy": 2.154422373324633, |
| "loss/hidden": 4.54140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3334257358685136, |
| "step": 660 |
| }, |
| { |
| "epoch": 0.067, |
| "grad_norm": 73.5, |
| "grad_norm_var": 144.94166666666666, |
| "learning_rate": 0.0001, |
| "loss": 10.5185, |
| "loss/crossentropy": 2.3223402693867685, |
| "loss/hidden": 4.795703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.37188967503607273, |
| "step": 670 |
| }, |
| { |
| "epoch": 0.068, |
| "grad_norm": 61.5, |
| "grad_norm_var": 169.65598958333334, |
| "learning_rate": 0.0001, |
| "loss": 10.5323, |
| "loss/crossentropy": 2.332353001832962, |
| "loss/hidden": 4.50625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31948004066944125, |
| "step": 680 |
| }, |
| { |
| "epoch": 0.069, |
| "grad_norm": 74.0, |
| "grad_norm_var": 155.94140625, |
| "learning_rate": 0.0001, |
| "loss": 10.4359, |
| "loss/crossentropy": 2.4077556908130644, |
| "loss/hidden": 4.623828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.339173823595047, |
| "step": 690 |
| }, |
| { |
| "epoch": 0.07, |
| "grad_norm": 82.5, |
| "grad_norm_var": 125.55416666666666, |
| "learning_rate": 0.0001, |
| "loss": 10.4493, |
| "loss/crossentropy": 2.292634981870651, |
| "loss/hidden": 4.571875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3477486100047827, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.071, |
| "grad_norm": 88.0, |
| "grad_norm_var": 155.84166666666667, |
| "learning_rate": 0.0001, |
| "loss": 10.2041, |
| "loss/crossentropy": 2.4034020826220512, |
| "loss/hidden": 4.53046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3406600248068571, |
| "step": 710 |
| }, |
| { |
| "epoch": 0.072, |
| "grad_norm": 124.0, |
| "grad_norm_var": 230.83307291666668, |
| "learning_rate": 0.0001, |
| "loss": 10.3489, |
| "loss/crossentropy": 2.333241228759289, |
| "loss/hidden": 4.6015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3285223826766014, |
| "step": 720 |
| }, |
| { |
| "epoch": 0.073, |
| "grad_norm": 71.0, |
| "grad_norm_var": 278.95390625, |
| "learning_rate": 0.0001, |
| "loss": 10.1548, |
| "loss/crossentropy": 2.4066421508789064, |
| "loss/hidden": 4.682421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.338771004602313, |
| "step": 730 |
| }, |
| { |
| "epoch": 0.074, |
| "grad_norm": 84.5, |
| "grad_norm_var": 166.85729166666667, |
| "learning_rate": 0.0001, |
| "loss": 10.2647, |
| "loss/crossentropy": 2.2724754482507707, |
| "loss/hidden": 4.567578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3267147310078144, |
| "step": 740 |
| }, |
| { |
| "epoch": 0.075, |
| "grad_norm": 67.5, |
| "grad_norm_var": 343.5247395833333, |
| "learning_rate": 0.0001, |
| "loss": 10.2815, |
| "loss/crossentropy": 2.3046080738306047, |
| "loss/hidden": 4.473828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.33236319161951544, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.076, |
| "grad_norm": 68.5, |
| "grad_norm_var": 306.540625, |
| "learning_rate": 0.0001, |
| "loss": 10.2479, |
| "loss/crossentropy": 2.2831736013293265, |
| "loss/hidden": 4.62734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3329113606363535, |
| "step": 760 |
| }, |
| { |
| "epoch": 0.077, |
| "grad_norm": 88.5, |
| "grad_norm_var": 111.57473958333334, |
| "learning_rate": 0.0001, |
| "loss": 10.2161, |
| "loss/crossentropy": 2.3853780582547186, |
| "loss/hidden": 4.541015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31959532871842383, |
| "step": 770 |
| }, |
| { |
| "epoch": 0.078, |
| "grad_norm": 80.5, |
| "grad_norm_var": 110.65729166666667, |
| "learning_rate": 0.0001, |
| "loss": 10.2076, |
| "loss/crossentropy": 2.3982744574546815, |
| "loss/hidden": 4.55859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3542841043323278, |
| "step": 780 |
| }, |
| { |
| "epoch": 0.079, |
| "grad_norm": 66.0, |
| "grad_norm_var": 275.6322916666667, |
| "learning_rate": 0.0001, |
| "loss": 10.1697, |
| "loss/crossentropy": 2.4292824655771255, |
| "loss/hidden": 4.632421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.36711033545434474, |
| "step": 790 |
| }, |
| { |
| "epoch": 0.08, |
| "grad_norm": 57.25, |
| "grad_norm_var": 290.9291666666667, |
| "learning_rate": 0.0001, |
| "loss": 10.2176, |
| "loss/crossentropy": 2.380542576313019, |
| "loss/hidden": 4.509375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3368827097117901, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.081, |
| "grad_norm": 60.75, |
| "grad_norm_var": 52.67916666666667, |
| "learning_rate": 0.0001, |
| "loss": 10.2311, |
| "loss/crossentropy": 2.4212940514087675, |
| "loss/hidden": 4.55546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.355662290379405, |
| "step": 810 |
| }, |
| { |
| "epoch": 0.082, |
| "grad_norm": 60.75, |
| "grad_norm_var": 65.81666666666666, |
| "learning_rate": 0.0001, |
| "loss": 10.1866, |
| "loss/crossentropy": 2.4809795886278154, |
| "loss/hidden": 4.49140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3553234666585922, |
| "step": 820 |
| }, |
| { |
| "epoch": 0.083, |
| "grad_norm": 56.5, |
| "grad_norm_var": 98.2875, |
| "learning_rate": 0.0001, |
| "loss": 9.9805, |
| "loss/crossentropy": 2.306653854250908, |
| "loss/hidden": 4.40078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3146494958549738, |
| "step": 830 |
| }, |
| { |
| "epoch": 0.084, |
| "grad_norm": 68.0, |
| "grad_norm_var": 38.51015625, |
| "learning_rate": 0.0001, |
| "loss": 10.1087, |
| "loss/crossentropy": 2.250006601214409, |
| "loss/hidden": 4.422265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30200174674391744, |
| "step": 840 |
| }, |
| { |
| "epoch": 0.085, |
| "grad_norm": 70.5, |
| "grad_norm_var": 43.483072916666664, |
| "learning_rate": 0.0001, |
| "loss": 10.0526, |
| "loss/crossentropy": 2.211633677780628, |
| "loss/hidden": 4.47421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31178686060011385, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.086, |
| "grad_norm": 61.0, |
| "grad_norm_var": 41.545572916666664, |
| "learning_rate": 0.0001, |
| "loss": 10.1915, |
| "loss/crossentropy": 2.5281356513500213, |
| "loss/hidden": 4.389453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.34625968635082244, |
| "step": 860 |
| }, |
| { |
| "epoch": 0.087, |
| "grad_norm": 72.5, |
| "grad_norm_var": 54.475, |
| "learning_rate": 0.0001, |
| "loss": 10.0007, |
| "loss/crossentropy": 2.4020907685160635, |
| "loss/hidden": 4.326171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.32252500094473363, |
| "step": 870 |
| }, |
| { |
| "epoch": 0.088, |
| "grad_norm": 142.0, |
| "grad_norm_var": 499.1375, |
| "learning_rate": 0.0001, |
| "loss": 9.99, |
| "loss/crossentropy": 2.384984764456749, |
| "loss/hidden": 4.38515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3189360786229372, |
| "step": 880 |
| }, |
| { |
| "epoch": 0.089, |
| "grad_norm": 57.75, |
| "grad_norm_var": 527.23515625, |
| "learning_rate": 0.0001, |
| "loss": 9.9879, |
| "loss/crossentropy": 2.3401281625032424, |
| "loss/hidden": 4.46328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3382201848551631, |
| "step": 890 |
| }, |
| { |
| "epoch": 0.09, |
| "grad_norm": 71.5, |
| "grad_norm_var": 95.97265625, |
| "learning_rate": 0.0001, |
| "loss": 9.9352, |
| "loss/crossentropy": 2.3969784706830977, |
| "loss/hidden": 4.384375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.336395762488246, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.091, |
| "grad_norm": 79.0, |
| "grad_norm_var": 144.96666666666667, |
| "learning_rate": 0.0001, |
| "loss": 10.149, |
| "loss/crossentropy": 2.4599110893905163, |
| "loss/hidden": 4.30703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3240171395242214, |
| "step": 910 |
| }, |
| { |
| "epoch": 0.092, |
| "grad_norm": 65.5, |
| "grad_norm_var": 119.2375, |
| "learning_rate": 0.0001, |
| "loss": 9.9634, |
| "loss/crossentropy": 2.4210876494646074, |
| "loss/hidden": 4.30390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.32166178375482557, |
| "step": 920 |
| }, |
| { |
| "epoch": 0.093, |
| "grad_norm": 63.0, |
| "grad_norm_var": 41.47083333333333, |
| "learning_rate": 0.0001, |
| "loss": 9.744, |
| "loss/crossentropy": 2.2256636448204516, |
| "loss/hidden": 4.284765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29795306362211704, |
| "step": 930 |
| }, |
| { |
| "epoch": 0.094, |
| "grad_norm": 53.5, |
| "grad_norm_var": 192.55807291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.8636, |
| "loss/crossentropy": 2.297808923572302, |
| "loss/hidden": 4.31640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30742434673011304, |
| "step": 940 |
| }, |
| { |
| "epoch": 0.095, |
| "grad_norm": 61.0, |
| "grad_norm_var": 81.95729166666666, |
| "learning_rate": 0.0001, |
| "loss": 9.798, |
| "loss/crossentropy": 2.3219059616327287, |
| "loss/hidden": 4.211328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30037002861499784, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.096, |
| "grad_norm": 56.75, |
| "grad_norm_var": 61.55807291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.7449, |
| "loss/crossentropy": 2.3104363679885864, |
| "loss/hidden": 4.388671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.327311984449625, |
| "step": 960 |
| }, |
| { |
| "epoch": 0.097, |
| "grad_norm": 60.0, |
| "grad_norm_var": 56.18932291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.9668, |
| "loss/crossentropy": 2.308886554837227, |
| "loss/hidden": 4.407421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3183224782347679, |
| "step": 970 |
| }, |
| { |
| "epoch": 0.098, |
| "grad_norm": 66.5, |
| "grad_norm_var": 42.05416666666667, |
| "learning_rate": 0.0001, |
| "loss": 9.7807, |
| "loss/crossentropy": 2.3363482102751734, |
| "loss/hidden": 4.2921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3384779039770365, |
| "step": 980 |
| }, |
| { |
| "epoch": 0.099, |
| "grad_norm": 57.25, |
| "grad_norm_var": 56.891666666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.7501, |
| "loss/crossentropy": 2.1767295479774473, |
| "loss/hidden": 4.466015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31410733237862587, |
| "step": 990 |
| }, |
| { |
| "epoch": 0.1, |
| "grad_norm": 50.25, |
| "grad_norm_var": 75.85598958333334, |
| "learning_rate": 0.0001, |
| "loss": 9.9273, |
| "loss/crossentropy": 2.505411845445633, |
| "loss/hidden": 4.36015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.33212706074118614, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.101, |
| "grad_norm": 77.5, |
| "grad_norm_var": 196.21848958333334, |
| "learning_rate": 0.0001, |
| "loss": 9.9237, |
| "loss/crossentropy": 2.3281257838010787, |
| "loss/hidden": 4.35546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.32293859515339135, |
| "step": 1010 |
| }, |
| { |
| "epoch": 0.102, |
| "grad_norm": 63.25, |
| "grad_norm_var": 167.42395833333333, |
| "learning_rate": 0.0001, |
| "loss": 9.7592, |
| "loss/crossentropy": 2.3165650010108947, |
| "loss/hidden": 4.32890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31759811006486416, |
| "step": 1020 |
| }, |
| { |
| "epoch": 0.103, |
| "grad_norm": 60.0, |
| "grad_norm_var": 153.80833333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.7366, |
| "loss/crossentropy": 2.3203016728162766, |
| "loss/hidden": 4.28515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31944827549159527, |
| "step": 1030 |
| }, |
| { |
| "epoch": 0.104, |
| "grad_norm": 66.5, |
| "grad_norm_var": 3319.3958333333335, |
| "learning_rate": 0.0001, |
| "loss": 10.0035, |
| "loss/crossentropy": 2.4188640087842943, |
| "loss/hidden": 4.38828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3581279247999191, |
| "step": 1040 |
| }, |
| { |
| "epoch": 0.105, |
| "grad_norm": 60.25, |
| "grad_norm_var": 3338.31640625, |
| "learning_rate": 0.0001, |
| "loss": 9.6837, |
| "loss/crossentropy": 2.2860016629099844, |
| "loss/hidden": 4.325390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.318701284006238, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.106, |
| "grad_norm": 74.0, |
| "grad_norm_var": 112.4, |
| "learning_rate": 0.0001, |
| "loss": 9.517, |
| "loss/crossentropy": 2.4143033266067504, |
| "loss/hidden": 4.319140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3072842717170715, |
| "step": 1060 |
| }, |
| { |
| "epoch": 0.107, |
| "grad_norm": 71.5, |
| "grad_norm_var": 68.60598958333334, |
| "learning_rate": 0.0001, |
| "loss": 9.8549, |
| "loss/crossentropy": 2.351083371043205, |
| "loss/hidden": 4.398046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.33429058492183683, |
| "step": 1070 |
| }, |
| { |
| "epoch": 0.108, |
| "grad_norm": 53.25, |
| "grad_norm_var": 43.83229166666667, |
| "learning_rate": 0.0001, |
| "loss": 9.7738, |
| "loss/crossentropy": 2.4011227190494537, |
| "loss/hidden": 4.29453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3128178097307682, |
| "step": 1080 |
| }, |
| { |
| "epoch": 0.109, |
| "grad_norm": 72.0, |
| "grad_norm_var": 34.82890625, |
| "learning_rate": 0.0001, |
| "loss": 9.7432, |
| "loss/crossentropy": 2.310031126439571, |
| "loss/hidden": 4.38984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3273486144840717, |
| "step": 1090 |
| }, |
| { |
| "epoch": 0.11, |
| "grad_norm": 66.5, |
| "grad_norm_var": 111.64895833333334, |
| "learning_rate": 0.0001, |
| "loss": 9.6743, |
| "loss/crossentropy": 2.3055127263069153, |
| "loss/hidden": 4.21796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.32233874313533306, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.111, |
| "grad_norm": 51.5, |
| "grad_norm_var": 46.70729166666667, |
| "learning_rate": 0.0001, |
| "loss": 9.8026, |
| "loss/crossentropy": 2.314373381435871, |
| "loss/hidden": 4.256640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3083756107836962, |
| "step": 1110 |
| }, |
| { |
| "epoch": 0.112, |
| "grad_norm": 57.75, |
| "grad_norm_var": 7292.4375, |
| "learning_rate": 0.0001, |
| "loss": 9.7291, |
| "loss/crossentropy": 2.5138203650712967, |
| "loss/hidden": 4.19921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30809955932199956, |
| "step": 1120 |
| }, |
| { |
| "epoch": 0.113, |
| "grad_norm": 56.5, |
| "grad_norm_var": 29.190625, |
| "learning_rate": 0.0001, |
| "loss": 9.6823, |
| "loss/crossentropy": 2.2719234466552733, |
| "loss/hidden": 4.294140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3143883816897869, |
| "step": 1130 |
| }, |
| { |
| "epoch": 0.114, |
| "grad_norm": 60.5, |
| "grad_norm_var": 45.925, |
| "learning_rate": 0.0001, |
| "loss": 9.7564, |
| "loss/crossentropy": 2.4254489660263063, |
| "loss/hidden": 4.261328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3154076419770718, |
| "step": 1140 |
| }, |
| { |
| "epoch": 0.115, |
| "grad_norm": 56.0, |
| "grad_norm_var": 71.74583333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.7001, |
| "loss/crossentropy": 2.28252642005682, |
| "loss/hidden": 4.323046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3203336976468563, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.116, |
| "grad_norm": 67.0, |
| "grad_norm_var": 46.040625, |
| "learning_rate": 0.0001, |
| "loss": 9.7436, |
| "loss/crossentropy": 2.391976150870323, |
| "loss/hidden": 4.225390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31455044373869895, |
| "step": 1160 |
| }, |
| { |
| "epoch": 0.117, |
| "grad_norm": 46.0, |
| "grad_norm_var": 47.06640625, |
| "learning_rate": 0.0001, |
| "loss": 9.5622, |
| "loss/crossentropy": 2.3361207604408265, |
| "loss/hidden": 4.19296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30060703232884406, |
| "step": 1170 |
| }, |
| { |
| "epoch": 0.118, |
| "grad_norm": 56.25, |
| "grad_norm_var": 49.264322916666664, |
| "learning_rate": 0.0001, |
| "loss": 9.6834, |
| "loss/crossentropy": 2.297483670711517, |
| "loss/hidden": 4.2890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2907493541017175, |
| "step": 1180 |
| }, |
| { |
| "epoch": 0.119, |
| "grad_norm": 52.5, |
| "grad_norm_var": 12.27890625, |
| "learning_rate": 0.0001, |
| "loss": 9.6207, |
| "loss/crossentropy": 2.2364058643579483, |
| "loss/hidden": 4.277734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3097097765654325, |
| "step": 1190 |
| }, |
| { |
| "epoch": 0.12, |
| "grad_norm": 68.5, |
| "grad_norm_var": 35.055989583333336, |
| "learning_rate": 0.0001, |
| "loss": 9.6018, |
| "loss/crossentropy": 2.2412969201803206, |
| "loss/hidden": 4.287109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31851550191640854, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.121, |
| "grad_norm": 60.5, |
| "grad_norm_var": 25.774739583333332, |
| "learning_rate": 0.0001, |
| "loss": 9.6979, |
| "loss/crossentropy": 2.3062032952904703, |
| "loss/hidden": 4.258984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3124631106853485, |
| "step": 1210 |
| }, |
| { |
| "epoch": 0.122, |
| "grad_norm": 59.25, |
| "grad_norm_var": 20.026822916666667, |
| "learning_rate": 0.0001, |
| "loss": 9.7129, |
| "loss/crossentropy": 2.4036868065595627, |
| "loss/hidden": 4.20234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31101155243813994, |
| "step": 1220 |
| }, |
| { |
| "epoch": 0.123, |
| "grad_norm": 53.25, |
| "grad_norm_var": 75.30833333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.7047, |
| "loss/crossentropy": 2.3730016142129897, |
| "loss/hidden": 4.193359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3105484452098608, |
| "step": 1230 |
| }, |
| { |
| "epoch": 0.124, |
| "grad_norm": 62.25, |
| "grad_norm_var": 33.27682291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.6313, |
| "loss/crossentropy": 2.2872567594051363, |
| "loss/hidden": 4.319140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3244694545865059, |
| "step": 1240 |
| }, |
| { |
| "epoch": 0.125, |
| "grad_norm": 61.25, |
| "grad_norm_var": 25.673958333333335, |
| "learning_rate": 0.0001, |
| "loss": 9.6217, |
| "loss/crossentropy": 2.3013710603117943, |
| "loss/hidden": 4.29140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.32178852558135984, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.126, |
| "grad_norm": 49.5, |
| "grad_norm_var": 46.97265625, |
| "learning_rate": 0.0001, |
| "loss": 9.6151, |
| "loss/crossentropy": 2.2743802405893803, |
| "loss/hidden": 4.19921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3104738780297339, |
| "step": 1260 |
| }, |
| { |
| "epoch": 0.127, |
| "grad_norm": 52.25, |
| "grad_norm_var": 318.94557291666666, |
| "learning_rate": 0.0001, |
| "loss": 9.635, |
| "loss/crossentropy": 2.2751111879944803, |
| "loss/hidden": 4.17578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2947248375043273, |
| "step": 1270 |
| }, |
| { |
| "epoch": 0.128, |
| "grad_norm": 59.5, |
| "grad_norm_var": 201.26848958333332, |
| "learning_rate": 0.0001, |
| "loss": 9.605, |
| "loss/crossentropy": 2.3590754181146623, |
| "loss/hidden": 4.116015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29773430675268175, |
| "step": 1280 |
| }, |
| { |
| "epoch": 0.129, |
| "grad_norm": 50.0, |
| "grad_norm_var": 25.795833333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.4314, |
| "loss/crossentropy": 2.165515697002411, |
| "loss/hidden": 4.148046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2729496695101261, |
| "step": 1290 |
| }, |
| { |
| "epoch": 0.13, |
| "grad_norm": 51.5, |
| "grad_norm_var": 65.69557291666666, |
| "learning_rate": 0.0001, |
| "loss": 9.4579, |
| "loss/crossentropy": 2.425456903874874, |
| "loss/hidden": 4.1140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3002984166145325, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.131, |
| "grad_norm": 55.75, |
| "grad_norm_var": 74.63515625, |
| "learning_rate": 0.0001, |
| "loss": 9.562, |
| "loss/crossentropy": 2.3212677478790282, |
| "loss/hidden": 4.209765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28645528480410576, |
| "step": 1310 |
| }, |
| { |
| "epoch": 0.132, |
| "grad_norm": 44.75, |
| "grad_norm_var": 39.139322916666664, |
| "learning_rate": 0.0001, |
| "loss": 9.305, |
| "loss/crossentropy": 2.2911602184176445, |
| "loss/hidden": 4.133984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28404638059437276, |
| "step": 1320 |
| }, |
| { |
| "epoch": 0.133, |
| "grad_norm": 52.25, |
| "grad_norm_var": 76.19583333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.3122, |
| "loss/crossentropy": 2.3109163105487824, |
| "loss/hidden": 4.137109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2864396806806326, |
| "step": 1330 |
| }, |
| { |
| "epoch": 0.134, |
| "grad_norm": 47.0, |
| "grad_norm_var": 41.66015625, |
| "learning_rate": 0.0001, |
| "loss": 9.4629, |
| "loss/crossentropy": 2.353537403047085, |
| "loss/hidden": 4.08203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2971150416880846, |
| "step": 1340 |
| }, |
| { |
| "epoch": 0.135, |
| "grad_norm": 46.25, |
| "grad_norm_var": 45.31848958333333, |
| "learning_rate": 0.0001, |
| "loss": 9.365, |
| "loss/crossentropy": 2.3774181246757506, |
| "loss/hidden": 4.09296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2799839396029711, |
| "step": 1350 |
| }, |
| { |
| "epoch": 0.136, |
| "grad_norm": 51.0, |
| "grad_norm_var": 17.93515625, |
| "learning_rate": 0.0001, |
| "loss": 9.3498, |
| "loss/crossentropy": 2.246833881735802, |
| "loss/hidden": 4.18828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2903384942561388, |
| "step": 1360 |
| }, |
| { |
| "epoch": 0.137, |
| "grad_norm": 51.25, |
| "grad_norm_var": 12.420833333333333, |
| "learning_rate": 0.0001, |
| "loss": 9.4976, |
| "loss/crossentropy": 2.453240838646889, |
| "loss/hidden": 4.173828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3144164770841599, |
| "step": 1370 |
| }, |
| { |
| "epoch": 0.138, |
| "grad_norm": 70.0, |
| "grad_norm_var": 2011.0322916666667, |
| "learning_rate": 0.0001, |
| "loss": 9.5884, |
| "loss/crossentropy": 2.174116183817387, |
| "loss/hidden": 4.24921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2923248626291752, |
| "step": 1380 |
| }, |
| { |
| "epoch": 0.139, |
| "grad_norm": 53.75, |
| "grad_norm_var": 1988.8833333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.5249, |
| "loss/crossentropy": 2.3638354018330574, |
| "loss/hidden": 4.184765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3066251628100872, |
| "step": 1390 |
| }, |
| { |
| "epoch": 0.14, |
| "grad_norm": 55.5, |
| "grad_norm_var": 22.779166666666665, |
| "learning_rate": 0.0001, |
| "loss": 9.3528, |
| "loss/crossentropy": 2.4166768550872804, |
| "loss/hidden": 4.123828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29636494982987643, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.141, |
| "grad_norm": 60.5, |
| "grad_norm_var": 66.59348958333334, |
| "learning_rate": 0.0001, |
| "loss": 9.5339, |
| "loss/crossentropy": 2.3475931867957116, |
| "loss/hidden": 4.1953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30608872696757317, |
| "step": 1410 |
| }, |
| { |
| "epoch": 0.142, |
| "grad_norm": 51.25, |
| "grad_norm_var": 62.49140625, |
| "learning_rate": 0.0001, |
| "loss": 9.3342, |
| "loss/crossentropy": 2.1785849004983904, |
| "loss/hidden": 4.161328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27641028352081776, |
| "step": 1420 |
| }, |
| { |
| "epoch": 0.143, |
| "grad_norm": 54.25, |
| "grad_norm_var": 30.154166666666665, |
| "learning_rate": 0.0001, |
| "loss": 9.3898, |
| "loss/crossentropy": 2.3990818440914152, |
| "loss/hidden": 4.18359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2944341886788607, |
| "step": 1430 |
| }, |
| { |
| "epoch": 0.144, |
| "grad_norm": 51.75, |
| "grad_norm_var": 38.93932291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.4628, |
| "loss/crossentropy": 2.4946817860007284, |
| "loss/hidden": 4.198828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31867978498339655, |
| "step": 1440 |
| }, |
| { |
| "epoch": 0.145, |
| "grad_norm": 53.75, |
| "grad_norm_var": 33.9, |
| "learning_rate": 0.0001, |
| "loss": 9.3416, |
| "loss/crossentropy": 2.2067521095275877, |
| "loss/hidden": 4.235546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2976540043950081, |
| "step": 1450 |
| }, |
| { |
| "epoch": 0.146, |
| "grad_norm": 60.75, |
| "grad_norm_var": 142.08229166666666, |
| "learning_rate": 0.0001, |
| "loss": 9.4716, |
| "loss/crossentropy": 2.4361192852258684, |
| "loss/hidden": 4.1140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2877715673297644, |
| "step": 1460 |
| }, |
| { |
| "epoch": 0.147, |
| "grad_norm": 58.75, |
| "grad_norm_var": 44.35, |
| "learning_rate": 0.0001, |
| "loss": 9.4006, |
| "loss/crossentropy": 2.239429622516036, |
| "loss/hidden": 4.026171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27844256814569235, |
| "step": 1470 |
| }, |
| { |
| "epoch": 0.148, |
| "grad_norm": 45.0, |
| "grad_norm_var": 33.95390625, |
| "learning_rate": 0.0001, |
| "loss": 9.3993, |
| "loss/crossentropy": 2.0759536787867545, |
| "loss/hidden": 4.068359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2688772227615118, |
| "step": 1480 |
| }, |
| { |
| "epoch": 0.149, |
| "grad_norm": 51.75, |
| "grad_norm_var": 25.795833333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.3786, |
| "loss/crossentropy": 2.286362998187542, |
| "loss/hidden": 4.15078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2942257083952427, |
| "step": 1490 |
| }, |
| { |
| "epoch": 0.15, |
| "grad_norm": 46.75, |
| "grad_norm_var": 20.520833333333332, |
| "learning_rate": 0.0001, |
| "loss": 9.2903, |
| "loss/crossentropy": 2.312733788788319, |
| "loss/hidden": 3.971484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2691910218447447, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.151, |
| "grad_norm": 50.25, |
| "grad_norm_var": 28.290625, |
| "learning_rate": 0.0001, |
| "loss": 9.3076, |
| "loss/crossentropy": 2.2467628076672552, |
| "loss/hidden": 4.105078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2887777745723724, |
| "step": 1510 |
| }, |
| { |
| "epoch": 0.152, |
| "grad_norm": 63.5, |
| "grad_norm_var": 33.73098958333333, |
| "learning_rate": 0.0001, |
| "loss": 9.4203, |
| "loss/crossentropy": 2.372379180788994, |
| "loss/hidden": 4.07578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3087839350104332, |
| "step": 1520 |
| }, |
| { |
| "epoch": 0.153, |
| "grad_norm": 45.5, |
| "grad_norm_var": 40.108333333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.3215, |
| "loss/crossentropy": 2.3452367037534714, |
| "loss/hidden": 4.210546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3159611392766237, |
| "step": 1530 |
| }, |
| { |
| "epoch": 0.154, |
| "grad_norm": 58.25, |
| "grad_norm_var": 27.539322916666666, |
| "learning_rate": 0.0001, |
| "loss": 9.3755, |
| "loss/crossentropy": 2.3029753446578978, |
| "loss/hidden": 3.999609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2623455457389355, |
| "step": 1540 |
| }, |
| { |
| "epoch": 0.155, |
| "grad_norm": 51.75, |
| "grad_norm_var": 26.9, |
| "learning_rate": 0.0001, |
| "loss": 9.3578, |
| "loss/crossentropy": 2.3988554388284684, |
| "loss/hidden": 4.08828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2846154376864433, |
| "step": 1550 |
| }, |
| { |
| "epoch": 0.156, |
| "grad_norm": 91.0, |
| "grad_norm_var": 1307.8372395833333, |
| "learning_rate": 0.0001, |
| "loss": 9.432, |
| "loss/crossentropy": 2.343544365465641, |
| "loss/hidden": 4.021875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2907770898193121, |
| "step": 1560 |
| }, |
| { |
| "epoch": 0.157, |
| "grad_norm": 52.0, |
| "grad_norm_var": 170.62890625, |
| "learning_rate": 0.0001, |
| "loss": 9.3432, |
| "loss/crossentropy": 2.173108433187008, |
| "loss/hidden": 4.10859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28518917988985776, |
| "step": 1570 |
| }, |
| { |
| "epoch": 0.158, |
| "grad_norm": 42.0, |
| "grad_norm_var": 47.56015625, |
| "learning_rate": 0.0001, |
| "loss": 9.367, |
| "loss/crossentropy": 2.2230691239237785, |
| "loss/hidden": 4.23515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30039387457072736, |
| "step": 1580 |
| }, |
| { |
| "epoch": 0.159, |
| "grad_norm": 72.0, |
| "grad_norm_var": 1.226104970407838e+18, |
| "learning_rate": 0.0001, |
| "loss": 9.3564, |
| "loss/crossentropy": 2.263391149044037, |
| "loss/hidden": 4.10625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2923804897814989, |
| "step": 1590 |
| }, |
| { |
| "epoch": 0.16, |
| "grad_norm": 52.5, |
| "grad_norm_var": 1.2261049681378806e+18, |
| "learning_rate": 0.0001, |
| "loss": 9.4959, |
| "loss/crossentropy": 2.113241518288851, |
| "loss/hidden": 4.087109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2759646028280258, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.161, |
| "grad_norm": 66.0, |
| "grad_norm_var": 734.1489583333333, |
| "learning_rate": 0.0001, |
| "loss": 9.4743, |
| "loss/crossentropy": 2.3895165085792542, |
| "loss/hidden": 4.059765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2987998936325312, |
| "step": 1610 |
| }, |
| { |
| "epoch": 0.162, |
| "grad_norm": 44.75, |
| "grad_norm_var": 50.00182291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.1919, |
| "loss/crossentropy": 2.251766300201416, |
| "loss/hidden": 4.04375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2781111396849155, |
| "step": 1620 |
| }, |
| { |
| "epoch": 0.163, |
| "grad_norm": 52.75, |
| "grad_norm_var": 437.49583333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.4572, |
| "loss/crossentropy": 2.382322034239769, |
| "loss/hidden": 4.07734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31318275928497313, |
| "step": 1630 |
| }, |
| { |
| "epoch": 0.164, |
| "grad_norm": 61.0, |
| "grad_norm_var": 40.301822916666666, |
| "learning_rate": 0.0001, |
| "loss": 9.2668, |
| "loss/crossentropy": 2.1683703124523164, |
| "loss/hidden": 4.07578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.283413190767169, |
| "step": 1640 |
| }, |
| { |
| "epoch": 0.165, |
| "grad_norm": 42.75, |
| "grad_norm_var": 57.307291666666664, |
| "learning_rate": 0.0001, |
| "loss": 9.339, |
| "loss/crossentropy": 2.3430400043725967, |
| "loss/hidden": 4.036328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.287694800645113, |
| "step": 1650 |
| }, |
| { |
| "epoch": 0.166, |
| "grad_norm": 46.75, |
| "grad_norm_var": 65.52395833333334, |
| "learning_rate": 0.0001, |
| "loss": 9.3768, |
| "loss/crossentropy": 2.2867416352033616, |
| "loss/hidden": 4.017578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29683431759476664, |
| "step": 1660 |
| }, |
| { |
| "epoch": 0.167, |
| "grad_norm": 52.5, |
| "grad_norm_var": 61.18932291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.2451, |
| "loss/crossentropy": 2.3707614041864873, |
| "loss/hidden": 4.06015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29184688804671166, |
| "step": 1670 |
| }, |
| { |
| "epoch": 0.168, |
| "grad_norm": 51.75, |
| "grad_norm_var": 20.895572916666666, |
| "learning_rate": 0.0001, |
| "loss": 9.3601, |
| "loss/crossentropy": 2.3268392831087112, |
| "loss/hidden": 4.12734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29570323824882505, |
| "step": 1680 |
| }, |
| { |
| "epoch": 0.169, |
| "grad_norm": 44.0, |
| "grad_norm_var": 10.290625, |
| "learning_rate": 0.0001, |
| "loss": 9.4214, |
| "loss/crossentropy": 2.324131193757057, |
| "loss/hidden": 4.1984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3133995305746794, |
| "step": 1690 |
| }, |
| { |
| "epoch": 0.17, |
| "grad_norm": 58.25, |
| "grad_norm_var": 19.124739583333334, |
| "learning_rate": 0.0001, |
| "loss": 9.2465, |
| "loss/crossentropy": 2.35849623978138, |
| "loss/hidden": 4.00703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2762619823217392, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.171, |
| "grad_norm": 45.75, |
| "grad_norm_var": 53.89895833333333, |
| "learning_rate": 0.0001, |
| "loss": 9.1951, |
| "loss/crossentropy": 2.3914038598537446, |
| "loss/hidden": 3.9984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2871177852153778, |
| "step": 1710 |
| }, |
| { |
| "epoch": 0.172, |
| "grad_norm": 43.25, |
| "grad_norm_var": 16.479166666666668, |
| "learning_rate": 0.0001, |
| "loss": 9.1669, |
| "loss/crossentropy": 2.152750685811043, |
| "loss/hidden": 4.100390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28708020225167274, |
| "step": 1720 |
| }, |
| { |
| "epoch": 0.173, |
| "grad_norm": 49.25, |
| "grad_norm_var": 13.45390625, |
| "learning_rate": 0.0001, |
| "loss": 9.1015, |
| "loss/crossentropy": 2.2946193665266037, |
| "loss/hidden": 4.085546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3062314610928297, |
| "step": 1730 |
| }, |
| { |
| "epoch": 0.174, |
| "grad_norm": 46.5, |
| "grad_norm_var": 22.473958333333332, |
| "learning_rate": 0.0001, |
| "loss": 9.1287, |
| "loss/crossentropy": 2.1538643553853034, |
| "loss/hidden": 3.9421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2666194221004844, |
| "step": 1740 |
| }, |
| { |
| "epoch": 0.175, |
| "grad_norm": 47.0, |
| "grad_norm_var": 32.62057291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.411, |
| "loss/crossentropy": 2.387891933321953, |
| "loss/hidden": 4.14921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29542505368590355, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.176, |
| "grad_norm": 45.25, |
| "grad_norm_var": 26.92265625, |
| "learning_rate": 0.0001, |
| "loss": 9.2833, |
| "loss/crossentropy": 2.3024097591638566, |
| "loss/hidden": 4.03984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29066667445003985, |
| "step": 1760 |
| }, |
| { |
| "epoch": 0.177, |
| "grad_norm": 53.5, |
| "grad_norm_var": 17.832291666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.2665, |
| "loss/crossentropy": 2.4454205125570296, |
| "loss/hidden": 3.955859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2910691563040018, |
| "step": 1770 |
| }, |
| { |
| "epoch": 0.178, |
| "grad_norm": 42.25, |
| "grad_norm_var": 29.865625, |
| "learning_rate": 0.0001, |
| "loss": 9.1701, |
| "loss/crossentropy": 2.2966391056776048, |
| "loss/hidden": 4.027734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2789210833609104, |
| "step": 1780 |
| }, |
| { |
| "epoch": 0.179, |
| "grad_norm": 48.25, |
| "grad_norm_var": 17.548958333333335, |
| "learning_rate": 0.0001, |
| "loss": 9.1992, |
| "loss/crossentropy": 2.395502945780754, |
| "loss/hidden": 3.934765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2776679117232561, |
| "step": 1790 |
| }, |
| { |
| "epoch": 0.18, |
| "grad_norm": 40.75, |
| "grad_norm_var": 13.282291666666667, |
| "learning_rate": 0.0001, |
| "loss": 9.1046, |
| "loss/crossentropy": 2.22285817861557, |
| "loss/hidden": 3.9046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26667180880904195, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.181, |
| "grad_norm": 36.25, |
| "grad_norm_var": 34.90807291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.3204, |
| "loss/crossentropy": 2.3842350512743, |
| "loss/hidden": 4.009375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.300260554254055, |
| "step": 1810 |
| }, |
| { |
| "epoch": 0.182, |
| "grad_norm": 46.75, |
| "grad_norm_var": 27.77890625, |
| "learning_rate": 0.0001, |
| "loss": 9.0943, |
| "loss/crossentropy": 2.274762773513794, |
| "loss/hidden": 4.01328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28360783979296683, |
| "step": 1820 |
| }, |
| { |
| "epoch": 0.183, |
| "grad_norm": 55.5, |
| "grad_norm_var": 27.298958333333335, |
| "learning_rate": 0.0001, |
| "loss": 9.1699, |
| "loss/crossentropy": 2.1643219627439976, |
| "loss/hidden": 3.9921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.267458438500762, |
| "step": 1830 |
| }, |
| { |
| "epoch": 0.184, |
| "grad_norm": 49.75, |
| "grad_norm_var": 43.94583333333333, |
| "learning_rate": 0.0001, |
| "loss": 9.3022, |
| "loss/crossentropy": 2.464679929614067, |
| "loss/hidden": 3.9546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29758369028568266, |
| "step": 1840 |
| }, |
| { |
| "epoch": 0.185, |
| "grad_norm": 51.0, |
| "grad_norm_var": 37.90807291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.1863, |
| "loss/crossentropy": 2.3199010998010636, |
| "loss/hidden": 3.99453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27702242247760295, |
| "step": 1850 |
| }, |
| { |
| "epoch": 0.186, |
| "grad_norm": 46.5, |
| "grad_norm_var": 40.920833333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.2872, |
| "loss/crossentropy": 2.4041683062911035, |
| "loss/hidden": 4.09140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.30005627647042277, |
| "step": 1860 |
| }, |
| { |
| "epoch": 0.187, |
| "grad_norm": 39.75, |
| "grad_norm_var": 40.723958333333336, |
| "learning_rate": 0.0001, |
| "loss": 9.1081, |
| "loss/crossentropy": 2.273802790045738, |
| "loss/hidden": 4.175, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3045934235677123, |
| "step": 1870 |
| }, |
| { |
| "epoch": 0.188, |
| "grad_norm": 43.25, |
| "grad_norm_var": 33.35729166666667, |
| "learning_rate": 0.0001, |
| "loss": 9.106, |
| "loss/crossentropy": 2.3607766672968866, |
| "loss/hidden": 3.9765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28193066976964476, |
| "step": 1880 |
| }, |
| { |
| "epoch": 0.189, |
| "grad_norm": 48.25, |
| "grad_norm_var": 14.915625, |
| "learning_rate": 0.0001, |
| "loss": 9.1437, |
| "loss/crossentropy": 2.2798361241817475, |
| "loss/hidden": 3.9890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2721746701747179, |
| "step": 1890 |
| }, |
| { |
| "epoch": 0.19, |
| "grad_norm": 44.0, |
| "grad_norm_var": 21.223958333333332, |
| "learning_rate": 0.0001, |
| "loss": 9.0972, |
| "loss/crossentropy": 2.21695294380188, |
| "loss/hidden": 4.0015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2832322970032692, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.191, |
| "grad_norm": 39.5, |
| "grad_norm_var": 27.808333333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.1587, |
| "loss/crossentropy": 2.1728454776108266, |
| "loss/hidden": 3.98828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27170457877218723, |
| "step": 1910 |
| }, |
| { |
| "epoch": 0.192, |
| "grad_norm": 41.75, |
| "grad_norm_var": 13.315625, |
| "learning_rate": 0.0001, |
| "loss": 9.1326, |
| "loss/crossentropy": 2.154237084835768, |
| "loss/hidden": 4.063671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27950075305998323, |
| "step": 1920 |
| }, |
| { |
| "epoch": 0.193, |
| "grad_norm": 43.0, |
| "grad_norm_var": 25.240625, |
| "learning_rate": 0.0001, |
| "loss": 9.1013, |
| "loss/crossentropy": 2.2507698431611063, |
| "loss/hidden": 4.01171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2808088269084692, |
| "step": 1930 |
| }, |
| { |
| "epoch": 0.194, |
| "grad_norm": 49.25, |
| "grad_norm_var": 22.832291666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.2429, |
| "loss/crossentropy": 2.288056728243828, |
| "loss/hidden": 4.1546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.31668607220053674, |
| "step": 1940 |
| }, |
| { |
| "epoch": 0.195, |
| "grad_norm": 48.5, |
| "grad_norm_var": 58.09557291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.1742, |
| "loss/crossentropy": 2.2107961744070055, |
| "loss/hidden": 4.05078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2858551822602749, |
| "step": 1950 |
| }, |
| { |
| "epoch": 0.196, |
| "grad_norm": 39.25, |
| "grad_norm_var": 49.50390625, |
| "learning_rate": 0.0001, |
| "loss": 9.1293, |
| "loss/crossentropy": 2.224529256671667, |
| "loss/hidden": 3.9703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27973891496658326, |
| "step": 1960 |
| }, |
| { |
| "epoch": 0.197, |
| "grad_norm": 39.25, |
| "grad_norm_var": 13.890625, |
| "learning_rate": 0.0001, |
| "loss": 9.0689, |
| "loss/crossentropy": 2.363737019896507, |
| "loss/hidden": 4.027734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2919711694121361, |
| "step": 1970 |
| }, |
| { |
| "epoch": 0.198, |
| "grad_norm": 55.75, |
| "grad_norm_var": 26.655989583333334, |
| "learning_rate": 0.0001, |
| "loss": 9.2228, |
| "loss/crossentropy": 2.3380469545722007, |
| "loss/hidden": 4.0171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28581551983952524, |
| "step": 1980 |
| }, |
| { |
| "epoch": 0.199, |
| "grad_norm": 45.0, |
| "grad_norm_var": 27.357291666666665, |
| "learning_rate": 0.0001, |
| "loss": 9.173, |
| "loss/crossentropy": 2.43135461807251, |
| "loss/hidden": 3.970703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28222124874591825, |
| "step": 1990 |
| }, |
| { |
| "epoch": 0.2, |
| "grad_norm": 44.5, |
| "grad_norm_var": 16.04140625, |
| "learning_rate": 0.0001, |
| "loss": 9.1554, |
| "loss/crossentropy": 2.4415812104940415, |
| "loss/hidden": 4.064453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2984179027378559, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.201, |
| "grad_norm": 51.75, |
| "grad_norm_var": 18.032291666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.204, |
| "loss/crossentropy": 2.571503698825836, |
| "loss/hidden": 4.020703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3065837759524584, |
| "step": 2010 |
| }, |
| { |
| "epoch": 0.202, |
| "grad_norm": 42.25, |
| "grad_norm_var": 16.573958333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.1575, |
| "loss/crossentropy": 2.2947281152009964, |
| "loss/hidden": 3.949609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28560531958937646, |
| "step": 2020 |
| }, |
| { |
| "epoch": 0.203, |
| "grad_norm": 43.0, |
| "grad_norm_var": 10.068489583333333, |
| "learning_rate": 0.0001, |
| "loss": 9.1347, |
| "loss/crossentropy": 2.432750529050827, |
| "loss/hidden": 3.9953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28870879150927065, |
| "step": 2030 |
| }, |
| { |
| "epoch": 0.204, |
| "grad_norm": 45.0, |
| "grad_norm_var": 14.683333333333334, |
| "learning_rate": 0.0001, |
| "loss": 9.0302, |
| "loss/crossentropy": 2.291880601644516, |
| "loss/hidden": 3.852734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27138952538371086, |
| "step": 2040 |
| }, |
| { |
| "epoch": 0.205, |
| "grad_norm": 54.5, |
| "grad_norm_var": 24.0625, |
| "learning_rate": 0.0001, |
| "loss": 9.1623, |
| "loss/crossentropy": 2.1033009082078933, |
| "loss/hidden": 4.074609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29145455472171305, |
| "step": 2050 |
| }, |
| { |
| "epoch": 0.206, |
| "grad_norm": 42.25, |
| "grad_norm_var": 29.780989583333334, |
| "learning_rate": 0.0001, |
| "loss": 9.1409, |
| "loss/crossentropy": 2.3487906470894813, |
| "loss/hidden": 3.9140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2789519714191556, |
| "step": 2060 |
| }, |
| { |
| "epoch": 0.207, |
| "grad_norm": 42.0, |
| "grad_norm_var": 15.395572916666667, |
| "learning_rate": 0.0001, |
| "loss": 9.0027, |
| "loss/crossentropy": 2.210711918771267, |
| "loss/hidden": 3.975, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2679262701421976, |
| "step": 2070 |
| }, |
| { |
| "epoch": 0.208, |
| "grad_norm": 43.5, |
| "grad_norm_var": 12.7125, |
| "learning_rate": 0.0001, |
| "loss": 9.0306, |
| "loss/crossentropy": 2.1945893600583077, |
| "loss/hidden": 3.948046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28115708455443383, |
| "step": 2080 |
| }, |
| { |
| "epoch": 0.209, |
| "grad_norm": 47.0, |
| "grad_norm_var": 28.683072916666667, |
| "learning_rate": 0.0001, |
| "loss": 9.1344, |
| "loss/crossentropy": 2.237662248313427, |
| "loss/hidden": 4.035546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2781697390601039, |
| "step": 2090 |
| }, |
| { |
| "epoch": 0.21, |
| "grad_norm": 42.0, |
| "grad_norm_var": 15.390625, |
| "learning_rate": 0.0001, |
| "loss": 9.0004, |
| "loss/crossentropy": 2.269840542972088, |
| "loss/hidden": 3.9765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29009242728352547, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.211, |
| "grad_norm": 39.75, |
| "grad_norm_var": 8.10390625, |
| "learning_rate": 0.0001, |
| "loss": 9.0135, |
| "loss/crossentropy": 2.3315619856119154, |
| "loss/hidden": 4.028515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27684418186545373, |
| "step": 2110 |
| }, |
| { |
| "epoch": 0.212, |
| "grad_norm": 45.75, |
| "grad_norm_var": 115.615625, |
| "learning_rate": 0.0001, |
| "loss": 9.2092, |
| "loss/crossentropy": 2.4081921339035035, |
| "loss/hidden": 4.01171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29490497298538687, |
| "step": 2120 |
| }, |
| { |
| "epoch": 0.213, |
| "grad_norm": 43.0, |
| "grad_norm_var": 95.76432291666667, |
| "learning_rate": 0.0001, |
| "loss": 9.1058, |
| "loss/crossentropy": 2.393023744225502, |
| "loss/hidden": 3.94921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2818208742886782, |
| "step": 2130 |
| }, |
| { |
| "epoch": 0.214, |
| "grad_norm": 39.5, |
| "grad_norm_var": 13.873958333333333, |
| "learning_rate": 0.0001, |
| "loss": 9.0997, |
| "loss/crossentropy": 2.1606352396309374, |
| "loss/hidden": 3.843359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25883881878107784, |
| "step": 2140 |
| }, |
| { |
| "epoch": 0.215, |
| "grad_norm": 44.0, |
| "grad_norm_var": 22.845572916666665, |
| "learning_rate": 0.0001, |
| "loss": 9.1464, |
| "loss/crossentropy": 2.179043120145798, |
| "loss/hidden": 4.056640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2761132620275021, |
| "step": 2150 |
| }, |
| { |
| "epoch": 0.216, |
| "grad_norm": 46.5, |
| "grad_norm_var": 28.957291666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.1057, |
| "loss/crossentropy": 2.301611530780792, |
| "loss/hidden": 3.959375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29443784058094025, |
| "step": 2160 |
| }, |
| { |
| "epoch": 0.217, |
| "grad_norm": 46.25, |
| "grad_norm_var": 36.708333333333336, |
| "learning_rate": 0.0001, |
| "loss": 9.0952, |
| "loss/crossentropy": 2.3569841012358665, |
| "loss/hidden": 3.994921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28191804718226193, |
| "step": 2170 |
| }, |
| { |
| "epoch": 0.218, |
| "grad_norm": 81.5, |
| "grad_norm_var": 117.24895833333333, |
| "learning_rate": 0.0001, |
| "loss": 9.0595, |
| "loss/crossentropy": 2.4225870154798033, |
| "loss/hidden": 3.953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2870227605104446, |
| "step": 2180 |
| }, |
| { |
| "epoch": 0.219, |
| "grad_norm": 36.75, |
| "grad_norm_var": 173.97265625, |
| "learning_rate": 0.0001, |
| "loss": 9.1612, |
| "loss/crossentropy": 2.34947164952755, |
| "loss/hidden": 4.0546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2929512483999133, |
| "step": 2190 |
| }, |
| { |
| "epoch": 0.22, |
| "grad_norm": 41.75, |
| "grad_norm_var": 31.648958333333333, |
| "learning_rate": 0.0001, |
| "loss": 9.101, |
| "loss/crossentropy": 2.218617644906044, |
| "loss/hidden": 4.029296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2752823047339916, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.221, |
| "grad_norm": 47.0, |
| "grad_norm_var": 10.50390625, |
| "learning_rate": 0.0001, |
| "loss": 9.0496, |
| "loss/crossentropy": 2.4772594451904295, |
| "loss/hidden": 3.96953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28055914528667925, |
| "step": 2210 |
| }, |
| { |
| "epoch": 0.222, |
| "grad_norm": 49.5, |
| "grad_norm_var": 442.3, |
| "learning_rate": 0.0001, |
| "loss": 8.9647, |
| "loss/crossentropy": 2.44835202395916, |
| "loss/hidden": 3.905859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27223448157310487, |
| "step": 2220 |
| }, |
| { |
| "epoch": 0.223, |
| "grad_norm": 35.25, |
| "grad_norm_var": 577.0458333333333, |
| "learning_rate": 0.0001, |
| "loss": 9.1287, |
| "loss/crossentropy": 2.2784701570868493, |
| "loss/hidden": 3.98671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28111674822866917, |
| "step": 2230 |
| }, |
| { |
| "epoch": 0.224, |
| "grad_norm": 43.75, |
| "grad_norm_var": 212.74348958333334, |
| "learning_rate": 0.0001, |
| "loss": 9.1339, |
| "loss/crossentropy": 2.2536803498864173, |
| "loss/hidden": 3.934375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27366876490414144, |
| "step": 2240 |
| }, |
| { |
| "epoch": 0.225, |
| "grad_norm": 42.5, |
| "grad_norm_var": 30.143489583333334, |
| "learning_rate": 0.0001, |
| "loss": 9.0536, |
| "loss/crossentropy": 2.300386372208595, |
| "loss/hidden": 3.937890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2872367199510336, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.226, |
| "grad_norm": 40.75, |
| "grad_norm_var": 19.65390625, |
| "learning_rate": 0.0001, |
| "loss": 9.0113, |
| "loss/crossentropy": 2.318666061758995, |
| "loss/hidden": 3.8796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.259370943903923, |
| "step": 2260 |
| }, |
| { |
| "epoch": 0.227, |
| "grad_norm": 43.75, |
| "grad_norm_var": 12.46015625, |
| "learning_rate": 0.0001, |
| "loss": 8.9343, |
| "loss/crossentropy": 2.169833867251873, |
| "loss/hidden": 3.8984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26568643413484094, |
| "step": 2270 |
| }, |
| { |
| "epoch": 0.228, |
| "grad_norm": 38.75, |
| "grad_norm_var": 11.832291666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.0042, |
| "loss/crossentropy": 2.1532950207591055, |
| "loss/hidden": 3.987890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2669289981946349, |
| "step": 2280 |
| }, |
| { |
| "epoch": 0.229, |
| "grad_norm": 46.5, |
| "grad_norm_var": 13.432291666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.1282, |
| "loss/crossentropy": 2.0724870592355726, |
| "loss/hidden": 4.09765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27939337231218814, |
| "step": 2290 |
| }, |
| { |
| "epoch": 0.23, |
| "grad_norm": 35.75, |
| "grad_norm_var": 223.3875, |
| "learning_rate": 0.0001, |
| "loss": 9.1656, |
| "loss/crossentropy": 2.4021882474422456, |
| "loss/hidden": 3.941796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28748833425343034, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.231, |
| "grad_norm": 40.5, |
| "grad_norm_var": 264.82395833333334, |
| "learning_rate": 0.0001, |
| "loss": 8.9421, |
| "loss/crossentropy": 2.201041653752327, |
| "loss/hidden": 4.012109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29431225806474687, |
| "step": 2310 |
| }, |
| { |
| "epoch": 0.232, |
| "grad_norm": 37.25, |
| "grad_norm_var": 14.939322916666667, |
| "learning_rate": 0.0001, |
| "loss": 9.0761, |
| "loss/crossentropy": 2.156717260926962, |
| "loss/hidden": 4.16484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27824588380753995, |
| "step": 2320 |
| }, |
| { |
| "epoch": 0.233, |
| "grad_norm": 44.0, |
| "grad_norm_var": 66.07890625, |
| "learning_rate": 0.0001, |
| "loss": 9.1021, |
| "loss/crossentropy": 2.3962995454669, |
| "loss/hidden": 3.849609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27611064203083513, |
| "step": 2330 |
| }, |
| { |
| "epoch": 0.234, |
| "grad_norm": 37.5, |
| "grad_norm_var": 2.5671221192004644e+18, |
| "learning_rate": 0.0001, |
| "loss": 9.1022, |
| "loss/crossentropy": 2.302082321047783, |
| "loss/hidden": 3.980859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2941384054720402, |
| "step": 2340 |
| }, |
| { |
| "epoch": 0.235, |
| "grad_norm": 42.75, |
| "grad_norm_var": 49.62473958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.7886, |
| "loss/crossentropy": 2.239869697391987, |
| "loss/hidden": 3.977734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25684802830219267, |
| "step": 2350 |
| }, |
| { |
| "epoch": 0.236, |
| "grad_norm": 39.5, |
| "grad_norm_var": 10.540625, |
| "learning_rate": 0.0001, |
| "loss": 8.8748, |
| "loss/crossentropy": 2.2847611531615257, |
| "loss/hidden": 4.076171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2846489936113358, |
| "step": 2360 |
| }, |
| { |
| "epoch": 0.237, |
| "grad_norm": 44.25, |
| "grad_norm_var": 33.15416666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.8811, |
| "loss/crossentropy": 2.209371344745159, |
| "loss/hidden": 3.907421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2737090703099966, |
| "step": 2370 |
| }, |
| { |
| "epoch": 0.238, |
| "grad_norm": 42.0, |
| "grad_norm_var": 58.614322916666666, |
| "learning_rate": 0.0001, |
| "loss": 9.0391, |
| "loss/crossentropy": 2.381209687888622, |
| "loss/hidden": 3.9296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2849856551736593, |
| "step": 2380 |
| }, |
| { |
| "epoch": 0.239, |
| "grad_norm": 39.75, |
| "grad_norm_var": 17.541666666666668, |
| "learning_rate": 0.0001, |
| "loss": 9.1216, |
| "loss/crossentropy": 2.2550544410943987, |
| "loss/hidden": 4.000390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2924022350460291, |
| "step": 2390 |
| }, |
| { |
| "epoch": 0.24, |
| "grad_norm": 46.25, |
| "grad_norm_var": 18.491666666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.9833, |
| "loss/crossentropy": 2.2908297032117844, |
| "loss/hidden": 3.901953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26052255779504774, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.241, |
| "grad_norm": 38.25, |
| "grad_norm_var": 23.640625, |
| "learning_rate": 0.0001, |
| "loss": 9.1123, |
| "loss/crossentropy": 2.3713207334280013, |
| "loss/hidden": 3.988671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2721697688102722, |
| "step": 2410 |
| }, |
| { |
| "epoch": 0.242, |
| "grad_norm": 42.5, |
| "grad_norm_var": 69.77473958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.9108, |
| "loss/crossentropy": 2.0408870808780195, |
| "loss/hidden": 3.873046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24748602956533433, |
| "step": 2420 |
| }, |
| { |
| "epoch": 0.243, |
| "grad_norm": 37.25, |
| "grad_norm_var": 72.11015625, |
| "learning_rate": 0.0001, |
| "loss": 9.0111, |
| "loss/crossentropy": 2.3027436569333077, |
| "loss/hidden": 3.908984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27137077748775484, |
| "step": 2430 |
| }, |
| { |
| "epoch": 0.244, |
| "grad_norm": 54.75, |
| "grad_norm_var": 55.057291666666664, |
| "learning_rate": 0.0001, |
| "loss": 9.0763, |
| "loss/crossentropy": 2.2573105663061144, |
| "loss/hidden": 4.1703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3013453852385283, |
| "step": 2440 |
| }, |
| { |
| "epoch": 0.245, |
| "grad_norm": 39.25, |
| "grad_norm_var": 50.00416666666667, |
| "learning_rate": 0.0001, |
| "loss": 9.0658, |
| "loss/crossentropy": 2.4534697026014327, |
| "loss/hidden": 4.172265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2898527968674898, |
| "step": 2450 |
| }, |
| { |
| "epoch": 0.246, |
| "grad_norm": 40.75, |
| "grad_norm_var": 18.71640625, |
| "learning_rate": 0.0001, |
| "loss": 8.9953, |
| "loss/crossentropy": 2.357613870501518, |
| "loss/hidden": 3.9125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27832051999866964, |
| "step": 2460 |
| }, |
| { |
| "epoch": 0.247, |
| "grad_norm": 34.0, |
| "grad_norm_var": 14.832291666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.9576, |
| "loss/crossentropy": 2.295011055469513, |
| "loss/hidden": 4.029296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29190085306763647, |
| "step": 2470 |
| }, |
| { |
| "epoch": 0.248, |
| "grad_norm": 50.75, |
| "grad_norm_var": 24.075, |
| "learning_rate": 0.0001, |
| "loss": 8.7866, |
| "loss/crossentropy": 2.2598410531878472, |
| "loss/hidden": 3.84921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2570509884506464, |
| "step": 2480 |
| }, |
| { |
| "epoch": 0.249, |
| "grad_norm": 38.0, |
| "grad_norm_var": 97.60416666666667, |
| "learning_rate": 0.0001, |
| "loss": 9.0289, |
| "loss/crossentropy": 2.365708181262016, |
| "loss/hidden": 3.835546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2712419513612986, |
| "step": 2490 |
| }, |
| { |
| "epoch": 0.25, |
| "grad_norm": 44.5, |
| "grad_norm_var": 104.10729166666667, |
| "learning_rate": 0.0001, |
| "loss": 8.8744, |
| "loss/crossentropy": 2.063434064388275, |
| "loss/hidden": 3.873046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2460779383778572, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.251, |
| "grad_norm": 41.75, |
| "grad_norm_var": 28.974739583333335, |
| "learning_rate": 0.0001, |
| "loss": 8.9272, |
| "loss/crossentropy": 2.2377428650856017, |
| "loss/hidden": 3.968359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26241020299494267, |
| "step": 2510 |
| }, |
| { |
| "epoch": 0.252, |
| "grad_norm": 43.5, |
| "grad_norm_var": 20.84140625, |
| "learning_rate": 0.0001, |
| "loss": 8.9773, |
| "loss/crossentropy": 2.2245729833841326, |
| "loss/hidden": 3.854296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2788569286465645, |
| "step": 2520 |
| }, |
| { |
| "epoch": 0.253, |
| "grad_norm": 37.25, |
| "grad_norm_var": 10.36640625, |
| "learning_rate": 0.0001, |
| "loss": 8.8657, |
| "loss/crossentropy": 2.19287933409214, |
| "loss/hidden": 3.873046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2571034274995327, |
| "step": 2530 |
| }, |
| { |
| "epoch": 0.254, |
| "grad_norm": 42.75, |
| "grad_norm_var": 25.641666666666666, |
| "learning_rate": 0.0001, |
| "loss": 9.0621, |
| "loss/crossentropy": 2.344786374270916, |
| "loss/hidden": 4.005078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29999860040843485, |
| "step": 2540 |
| }, |
| { |
| "epoch": 0.255, |
| "grad_norm": 45.25, |
| "grad_norm_var": 28.983333333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.8825, |
| "loss/crossentropy": 2.165843137353659, |
| "loss/hidden": 3.794921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24595264531672, |
| "step": 2550 |
| }, |
| { |
| "epoch": 0.256, |
| "grad_norm": 44.0, |
| "grad_norm_var": 13.407291666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.9041, |
| "loss/crossentropy": 2.3431010633707046, |
| "loss/hidden": 3.880859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27991249822080133, |
| "step": 2560 |
| }, |
| { |
| "epoch": 0.257, |
| "grad_norm": 49.25, |
| "grad_norm_var": 19.137239583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.7585, |
| "loss/crossentropy": 2.2741693764925004, |
| "loss/hidden": 3.848046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2573831077665091, |
| "step": 2570 |
| }, |
| { |
| "epoch": 0.258, |
| "grad_norm": 40.75, |
| "grad_norm_var": 23.523958333333333, |
| "learning_rate": 0.0001, |
| "loss": 9.0385, |
| "loss/crossentropy": 2.3741003662347793, |
| "loss/hidden": 3.9703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3021115079522133, |
| "step": 2580 |
| }, |
| { |
| "epoch": 0.259, |
| "grad_norm": 38.5, |
| "grad_norm_var": 20.873958333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.8554, |
| "loss/crossentropy": 2.259954023361206, |
| "loss/hidden": 3.92734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2688195243477821, |
| "step": 2590 |
| }, |
| { |
| "epoch": 0.26, |
| "grad_norm": 33.25, |
| "grad_norm_var": 16.76015625, |
| "learning_rate": 0.0001, |
| "loss": 8.8802, |
| "loss/crossentropy": 2.155090569704771, |
| "loss/hidden": 3.91796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.256628708448261, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.261, |
| "grad_norm": 41.5, |
| "grad_norm_var": 15.032291666666667, |
| "learning_rate": 0.0001, |
| "loss": 9.0016, |
| "loss/crossentropy": 2.365834577381611, |
| "loss/hidden": 3.85859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27286841757595537, |
| "step": 2610 |
| }, |
| { |
| "epoch": 0.262, |
| "grad_norm": 41.75, |
| "grad_norm_var": 9.50390625, |
| "learning_rate": 0.0001, |
| "loss": 8.8993, |
| "loss/crossentropy": 2.137889374792576, |
| "loss/hidden": 3.96171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27018810212612154, |
| "step": 2620 |
| }, |
| { |
| "epoch": 0.263, |
| "grad_norm": 44.75, |
| "grad_norm_var": 12.58515625, |
| "learning_rate": 0.0001, |
| "loss": 8.9333, |
| "loss/crossentropy": 2.279120808839798, |
| "loss/hidden": 3.8859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26760734505951406, |
| "step": 2630 |
| }, |
| { |
| "epoch": 0.264, |
| "grad_norm": 33.75, |
| "grad_norm_var": 20.205989583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.998, |
| "loss/crossentropy": 2.133891487121582, |
| "loss/hidden": 3.978125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2548325901851058, |
| "step": 2640 |
| }, |
| { |
| "epoch": 0.265, |
| "grad_norm": 38.25, |
| "grad_norm_var": 24.26640625, |
| "learning_rate": 0.0001, |
| "loss": 8.8773, |
| "loss/crossentropy": 2.0680222399532795, |
| "loss/hidden": 3.983984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2616095909848809, |
| "step": 2650 |
| }, |
| { |
| "epoch": 0.266, |
| "grad_norm": 37.75, |
| "grad_norm_var": 24.65390625, |
| "learning_rate": 0.0001, |
| "loss": 8.8787, |
| "loss/crossentropy": 2.2427713751792906, |
| "loss/hidden": 3.976171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2840047996491194, |
| "step": 2660 |
| }, |
| { |
| "epoch": 0.267, |
| "grad_norm": 50.0, |
| "grad_norm_var": 22.190625, |
| "learning_rate": 0.0001, |
| "loss": 8.878, |
| "loss/crossentropy": 2.3332558259367944, |
| "loss/hidden": 3.774609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2527316328138113, |
| "step": 2670 |
| }, |
| { |
| "epoch": 0.268, |
| "grad_norm": 58.5, |
| "grad_norm_var": 53.95, |
| "learning_rate": 0.0001, |
| "loss": 8.8498, |
| "loss/crossentropy": 2.116853891313076, |
| "loss/hidden": 3.810546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24547674022614957, |
| "step": 2680 |
| }, |
| { |
| "epoch": 0.269, |
| "grad_norm": 37.5, |
| "grad_norm_var": 48.47057291666667, |
| "learning_rate": 0.0001, |
| "loss": 8.9221, |
| "loss/crossentropy": 2.468301197886467, |
| "loss/hidden": 3.809765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26750445999205114, |
| "step": 2690 |
| }, |
| { |
| "epoch": 0.27, |
| "grad_norm": 46.5, |
| "grad_norm_var": 16.812239583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.7264, |
| "loss/crossentropy": 2.1068901009857655, |
| "loss/hidden": 3.970703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2827789710834622, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.271, |
| "grad_norm": 41.25, |
| "grad_norm_var": 17.765625, |
| "learning_rate": 0.0001, |
| "loss": 8.766, |
| "loss/crossentropy": 2.4922314494848252, |
| "loss/hidden": 3.943359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28248917534947393, |
| "step": 2710 |
| }, |
| { |
| "epoch": 0.272, |
| "grad_norm": 37.25, |
| "grad_norm_var": 4.524739583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.8324, |
| "loss/crossentropy": 2.268562327325344, |
| "loss/hidden": 3.8296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.250153512135148, |
| "step": 2720 |
| }, |
| { |
| "epoch": 0.273, |
| "grad_norm": 38.5, |
| "grad_norm_var": 11.832291666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.8538, |
| "loss/crossentropy": 2.171993290632963, |
| "loss/hidden": 3.860546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2605165271088481, |
| "step": 2730 |
| }, |
| { |
| "epoch": 0.274, |
| "grad_norm": 37.25, |
| "grad_norm_var": 17.5625, |
| "learning_rate": 0.0001, |
| "loss": 8.8625, |
| "loss/crossentropy": 2.3496526792645454, |
| "loss/hidden": 3.78828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2532901844009757, |
| "step": 2740 |
| }, |
| { |
| "epoch": 0.275, |
| "grad_norm": 38.5, |
| "grad_norm_var": 18.69140625, |
| "learning_rate": 0.0001, |
| "loss": 8.8941, |
| "loss/crossentropy": 2.377558296918869, |
| "loss/hidden": 3.844921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2773334577679634, |
| "step": 2750 |
| }, |
| { |
| "epoch": 0.276, |
| "grad_norm": 46.75, |
| "grad_norm_var": 22.22890625, |
| "learning_rate": 0.0001, |
| "loss": 8.7839, |
| "loss/crossentropy": 2.248410400748253, |
| "loss/hidden": 3.980078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27028046883642676, |
| "step": 2760 |
| }, |
| { |
| "epoch": 0.277, |
| "grad_norm": 38.75, |
| "grad_norm_var": 23.164322916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.7791, |
| "loss/crossentropy": 2.4241216853260994, |
| "loss/hidden": 3.8984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2659046190790832, |
| "step": 2770 |
| }, |
| { |
| "epoch": 0.278, |
| "grad_norm": 53.25, |
| "grad_norm_var": 59.78098958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.8019, |
| "loss/crossentropy": 2.1859550148248674, |
| "loss/hidden": 3.828515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25679499059915545, |
| "step": 2780 |
| }, |
| { |
| "epoch": 0.279, |
| "grad_norm": 41.5, |
| "grad_norm_var": 62.925455729166664, |
| "learning_rate": 0.0001, |
| "loss": 8.8086, |
| "loss/crossentropy": 2.312130589783192, |
| "loss/hidden": 3.91171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2820352425798774, |
| "step": 2790 |
| }, |
| { |
| "epoch": 0.28, |
| "grad_norm": 46.75, |
| "grad_norm_var": 18.951822916666668, |
| "learning_rate": 0.0001, |
| "loss": 8.7941, |
| "loss/crossentropy": 2.258910335600376, |
| "loss/hidden": 3.902734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2627917256206274, |
| "step": 2800 |
| }, |
| { |
| "epoch": 0.281, |
| "grad_norm": 40.25, |
| "grad_norm_var": 17.639322916666668, |
| "learning_rate": 0.0001, |
| "loss": 8.7417, |
| "loss/crossentropy": 2.2589244581758976, |
| "loss/hidden": 3.753515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2513848140835762, |
| "step": 2810 |
| }, |
| { |
| "epoch": 0.282, |
| "grad_norm": 40.5, |
| "grad_norm_var": 12.032291666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.8627, |
| "loss/crossentropy": 2.17684805393219, |
| "loss/hidden": 3.853125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.255695578455925, |
| "step": 2820 |
| }, |
| { |
| "epoch": 0.283, |
| "grad_norm": 40.75, |
| "grad_norm_var": 114.61432291666667, |
| "learning_rate": 0.0001, |
| "loss": 8.8412, |
| "loss/crossentropy": 2.2954238772392275, |
| "loss/hidden": 3.918359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2858715243637562, |
| "step": 2830 |
| }, |
| { |
| "epoch": 0.284, |
| "grad_norm": 40.5, |
| "grad_norm_var": 5.641666666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.9391, |
| "loss/crossentropy": 2.329847712814808, |
| "loss/hidden": 3.7625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25534543097019197, |
| "step": 2840 |
| }, |
| { |
| "epoch": 0.285, |
| "grad_norm": 38.5, |
| "grad_norm_var": 40.84557291666667, |
| "learning_rate": 0.0001, |
| "loss": 8.7803, |
| "loss/crossentropy": 2.2853938594460486, |
| "loss/hidden": 3.866796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24832999743521214, |
| "step": 2850 |
| }, |
| { |
| "epoch": 0.286, |
| "grad_norm": 36.75, |
| "grad_norm_var": 39.958072916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.6796, |
| "loss/crossentropy": 2.234159553050995, |
| "loss/hidden": 3.847265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26201403168961407, |
| "step": 2860 |
| }, |
| { |
| "epoch": 0.287, |
| "grad_norm": 39.0, |
| "grad_norm_var": 9.457291666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.7109, |
| "loss/crossentropy": 2.2819659531116487, |
| "loss/hidden": 3.894140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26489345021545885, |
| "step": 2870 |
| }, |
| { |
| "epoch": 0.288, |
| "grad_norm": 37.25, |
| "grad_norm_var": 9.268489583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.828, |
| "loss/crossentropy": 2.4358034074306487, |
| "loss/hidden": 3.81328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27083273865282537, |
| "step": 2880 |
| }, |
| { |
| "epoch": 0.289, |
| "grad_norm": 42.0, |
| "grad_norm_var": 12.805989583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.8211, |
| "loss/crossentropy": 2.2965500839054585, |
| "loss/hidden": 3.869921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26719480073079466, |
| "step": 2890 |
| }, |
| { |
| "epoch": 0.29, |
| "grad_norm": 37.25, |
| "grad_norm_var": 9.020572916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.7927, |
| "loss/crossentropy": 2.3898714184761047, |
| "loss/hidden": 3.930078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2959909211844206, |
| "step": 2900 |
| }, |
| { |
| "epoch": 0.291, |
| "grad_norm": 44.25, |
| "grad_norm_var": 10.283333333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.7893, |
| "loss/crossentropy": 2.349339473247528, |
| "loss/hidden": 3.7953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25724136754870414, |
| "step": 2910 |
| }, |
| { |
| "epoch": 0.292, |
| "grad_norm": 35.5, |
| "grad_norm_var": 17.418489583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.7582, |
| "loss/crossentropy": 2.1329104267060757, |
| "loss/hidden": 3.83046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2572308249771595, |
| "step": 2920 |
| }, |
| { |
| "epoch": 0.293, |
| "grad_norm": 42.0, |
| "grad_norm_var": 14.87265625, |
| "learning_rate": 0.0001, |
| "loss": 8.6388, |
| "loss/crossentropy": 2.23364320397377, |
| "loss/hidden": 3.82421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2617119399830699, |
| "step": 2930 |
| }, |
| { |
| "epoch": 0.294, |
| "grad_norm": 42.75, |
| "grad_norm_var": 8.204166666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.6531, |
| "loss/crossentropy": 2.1770762100815775, |
| "loss/hidden": 3.833984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2418960839509964, |
| "step": 2940 |
| }, |
| { |
| "epoch": 0.295, |
| "grad_norm": 45.5, |
| "grad_norm_var": 16.265625, |
| "learning_rate": 0.0001, |
| "loss": 8.8053, |
| "loss/crossentropy": 2.18697277456522, |
| "loss/hidden": 3.78125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26141371857374907, |
| "step": 2950 |
| }, |
| { |
| "epoch": 0.296, |
| "grad_norm": 32.25, |
| "grad_norm_var": 19.145833333333332, |
| "learning_rate": 0.0001, |
| "loss": 8.7178, |
| "loss/crossentropy": 2.303604170680046, |
| "loss/hidden": 3.738671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2531938493251801, |
| "step": 2960 |
| }, |
| { |
| "epoch": 0.297, |
| "grad_norm": 33.5, |
| "grad_norm_var": 10.88515625, |
| "learning_rate": 0.0001, |
| "loss": 8.5781, |
| "loss/crossentropy": 2.112999178469181, |
| "loss/hidden": 3.853515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2678428884595633, |
| "step": 2970 |
| }, |
| { |
| "epoch": 0.298, |
| "grad_norm": 44.0, |
| "grad_norm_var": 32.84973958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.8415, |
| "loss/crossentropy": 2.407758575677872, |
| "loss/hidden": 3.925, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3035837195813656, |
| "step": 2980 |
| }, |
| { |
| "epoch": 0.299, |
| "grad_norm": 36.5, |
| "grad_norm_var": 21.0125, |
| "learning_rate": 0.0001, |
| "loss": 8.6496, |
| "loss/crossentropy": 2.1467753663659095, |
| "loss/hidden": 3.802734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25200750436633823, |
| "step": 2990 |
| }, |
| { |
| "epoch": 0.3, |
| "grad_norm": 37.25, |
| "grad_norm_var": 102.01848958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.5981, |
| "loss/crossentropy": 2.22710300385952, |
| "loss/hidden": 3.741015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24842255041003228, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.301, |
| "grad_norm": 37.5, |
| "grad_norm_var": 20.633072916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.6443, |
| "loss/crossentropy": 2.3478307321667673, |
| "loss/hidden": 3.805859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25510224178433416, |
| "step": 3010 |
| }, |
| { |
| "epoch": 0.302, |
| "grad_norm": 41.5, |
| "grad_norm_var": 8.864322916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.7802, |
| "loss/crossentropy": 2.3206353336572647, |
| "loss/hidden": 3.855078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2652175173163414, |
| "step": 3020 |
| }, |
| { |
| "epoch": 0.303, |
| "grad_norm": 46.25, |
| "grad_norm_var": 1.6257994395224812e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.8456, |
| "loss/crossentropy": 2.3741259276866913, |
| "loss/hidden": 3.973046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2950541414320469, |
| "step": 3030 |
| }, |
| { |
| "epoch": 0.304, |
| "grad_norm": 38.25, |
| "grad_norm_var": 1.6257994392887188e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.7725, |
| "loss/crossentropy": 2.413549691438675, |
| "loss/hidden": 3.87734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26055113933980467, |
| "step": 3040 |
| }, |
| { |
| "epoch": 0.305, |
| "grad_norm": 46.0, |
| "grad_norm_var": 297.95807291666665, |
| "learning_rate": 0.0001, |
| "loss": 8.66, |
| "loss/crossentropy": 2.3086455732584, |
| "loss/hidden": 3.956640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2837849177420139, |
| "step": 3050 |
| }, |
| { |
| "epoch": 0.306, |
| "grad_norm": 38.75, |
| "grad_norm_var": 326.315625, |
| "learning_rate": 0.0001, |
| "loss": 8.7568, |
| "loss/crossentropy": 2.3664418935775755, |
| "loss/hidden": 3.912890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2698039198294282, |
| "step": 3060 |
| }, |
| { |
| "epoch": 0.307, |
| "grad_norm": 36.5, |
| "grad_norm_var": 21.515625, |
| "learning_rate": 0.0001, |
| "loss": 8.7201, |
| "loss/crossentropy": 2.1954890489578247, |
| "loss/hidden": 3.8921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2684766609221697, |
| "step": 3070 |
| }, |
| { |
| "epoch": 0.308, |
| "grad_norm": 41.0, |
| "grad_norm_var": 7.140625, |
| "learning_rate": 0.0001, |
| "loss": 8.8511, |
| "loss/crossentropy": 2.310048124939203, |
| "loss/hidden": 3.84609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2778544146567583, |
| "step": 3080 |
| }, |
| { |
| "epoch": 0.309, |
| "grad_norm": 35.25, |
| "grad_norm_var": 10.1875, |
| "learning_rate": 0.0001, |
| "loss": 8.7144, |
| "loss/crossentropy": 2.3563184320926664, |
| "loss/hidden": 3.8625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2816514492034912, |
| "step": 3090 |
| }, |
| { |
| "epoch": 0.31, |
| "grad_norm": 36.75, |
| "grad_norm_var": 2.279947281849385e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.7129, |
| "loss/crossentropy": 2.439156624674797, |
| "loss/hidden": 3.85078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28767146319150927, |
| "step": 3100 |
| }, |
| { |
| "epoch": 0.311, |
| "grad_norm": 39.5, |
| "grad_norm_var": 2.2799472807295058e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.6894, |
| "loss/crossentropy": 2.176630274951458, |
| "loss/hidden": 3.76484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25743562281131743, |
| "step": 3110 |
| }, |
| { |
| "epoch": 0.312, |
| "grad_norm": 36.0, |
| "grad_norm_var": 42.69348958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.6887, |
| "loss/crossentropy": 2.2695231288671494, |
| "loss/hidden": 3.796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2609828669577837, |
| "step": 3120 |
| }, |
| { |
| "epoch": 0.313, |
| "grad_norm": 38.75, |
| "grad_norm_var": 12.6875, |
| "learning_rate": 0.0001, |
| "loss": 8.7316, |
| "loss/crossentropy": 2.2914074435830116, |
| "loss/hidden": 3.970703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25941712930798533, |
| "step": 3130 |
| }, |
| { |
| "epoch": 0.314, |
| "grad_norm": 35.5, |
| "grad_norm_var": 9.191666666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.675, |
| "loss/crossentropy": 2.2625655576586725, |
| "loss/hidden": 3.950390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26395085640251637, |
| "step": 3140 |
| }, |
| { |
| "epoch": 0.315, |
| "grad_norm": 48.25, |
| "grad_norm_var": 64.8125, |
| "learning_rate": 0.0001, |
| "loss": 8.8068, |
| "loss/crossentropy": 2.157360579818487, |
| "loss/hidden": 3.814453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2572598461061716, |
| "step": 3150 |
| }, |
| { |
| "epoch": 0.316, |
| "grad_norm": 36.0, |
| "grad_norm_var": 69.0212890625, |
| "learning_rate": 0.0001, |
| "loss": 8.656, |
| "loss/crossentropy": 2.24196752011776, |
| "loss/hidden": 3.821875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27792793661355975, |
| "step": 3160 |
| }, |
| { |
| "epoch": 0.317, |
| "grad_norm": 35.0, |
| "grad_norm_var": 8.108333333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.662, |
| "loss/crossentropy": 2.2520006895065308, |
| "loss/hidden": 3.708203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2597096076235175, |
| "step": 3170 |
| }, |
| { |
| "epoch": 0.318, |
| "grad_norm": 34.0, |
| "grad_norm_var": 12.848958333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.5281, |
| "loss/crossentropy": 2.2527818381786346, |
| "loss/hidden": 3.772265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2495524413883686, |
| "step": 3180 |
| }, |
| { |
| "epoch": 0.319, |
| "grad_norm": 41.0, |
| "grad_norm_var": 51.365625, |
| "learning_rate": 0.0001, |
| "loss": 8.7132, |
| "loss/crossentropy": 2.2904104314744473, |
| "loss/hidden": 3.944140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2817814026027918, |
| "step": 3190 |
| }, |
| { |
| "epoch": 0.32, |
| "grad_norm": 36.25, |
| "grad_norm_var": 22.22265625, |
| "learning_rate": 0.0001, |
| "loss": 8.4574, |
| "loss/crossentropy": 2.090634661912918, |
| "loss/hidden": 3.79921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24919861294329165, |
| "step": 3200 |
| }, |
| { |
| "epoch": 0.321, |
| "grad_norm": 38.5, |
| "grad_norm_var": 10.030989583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.5723, |
| "loss/crossentropy": 2.117210125923157, |
| "loss/hidden": 3.672265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2293582683429122, |
| "step": 3210 |
| }, |
| { |
| "epoch": 0.322, |
| "grad_norm": 36.25, |
| "grad_norm_var": 17.933072916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.5557, |
| "loss/crossentropy": 2.252604177594185, |
| "loss/hidden": 3.71171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2455908928066492, |
| "step": 3220 |
| }, |
| { |
| "epoch": 0.323, |
| "grad_norm": 34.0, |
| "grad_norm_var": 15.673958333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.6303, |
| "loss/crossentropy": 2.2780065298080445, |
| "loss/hidden": 3.701953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25410398468375206, |
| "step": 3230 |
| }, |
| { |
| "epoch": 0.324, |
| "grad_norm": 37.75, |
| "grad_norm_var": 31.186393229166665, |
| "learning_rate": 0.0001, |
| "loss": 8.6026, |
| "loss/crossentropy": 2.3745017647743225, |
| "loss/hidden": 3.819140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2740194508805871, |
| "step": 3240 |
| }, |
| { |
| "epoch": 0.325, |
| "grad_norm": 44.75, |
| "grad_norm_var": 16.865625, |
| "learning_rate": 0.0001, |
| "loss": 8.7984, |
| "loss/crossentropy": 2.3756151482462884, |
| "loss/hidden": 3.894921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2671732004731894, |
| "step": 3250 |
| }, |
| { |
| "epoch": 0.326, |
| "grad_norm": 44.0, |
| "grad_norm_var": 18.073893229166668, |
| "learning_rate": 0.0001, |
| "loss": 8.6327, |
| "loss/crossentropy": 2.3531317353248595, |
| "loss/hidden": 3.717578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25254391208291055, |
| "step": 3260 |
| }, |
| { |
| "epoch": 0.327, |
| "grad_norm": 38.5, |
| "grad_norm_var": 10.248893229166667, |
| "learning_rate": 0.0001, |
| "loss": 8.6167, |
| "loss/crossentropy": 2.283932936191559, |
| "loss/hidden": 3.96171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.28510861806571486, |
| "step": 3270 |
| }, |
| { |
| "epoch": 0.328, |
| "grad_norm": 32.25, |
| "grad_norm_var": 7.364322916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.6054, |
| "loss/crossentropy": 2.1746396124362946, |
| "loss/hidden": 3.709375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24525153413414955, |
| "step": 3280 |
| }, |
| { |
| "epoch": 0.329, |
| "grad_norm": 41.25, |
| "grad_norm_var": 18.978059895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.8527, |
| "loss/crossentropy": 2.270473413169384, |
| "loss/hidden": 3.8609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.274440161883831, |
| "step": 3290 |
| }, |
| { |
| "epoch": 0.33, |
| "grad_norm": 48.5, |
| "grad_norm_var": 18.778580729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.6327, |
| "loss/crossentropy": 2.3394594848155976, |
| "loss/hidden": 3.77109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25994330234825613, |
| "step": 3300 |
| }, |
| { |
| "epoch": 0.331, |
| "grad_norm": 34.75, |
| "grad_norm_var": 17.190625, |
| "learning_rate": 0.0001, |
| "loss": 8.7313, |
| "loss/crossentropy": 2.347683647274971, |
| "loss/hidden": 3.894921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27402915358543395, |
| "step": 3310 |
| }, |
| { |
| "epoch": 0.332, |
| "grad_norm": 31.0, |
| "grad_norm_var": 27.712239583333332, |
| "learning_rate": 0.0001, |
| "loss": 8.7637, |
| "loss/crossentropy": 2.257617971301079, |
| "loss/hidden": 3.88046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2689027152955532, |
| "step": 3320 |
| }, |
| { |
| "epoch": 0.333, |
| "grad_norm": 40.0, |
| "grad_norm_var": 27.319205729166665, |
| "learning_rate": 0.0001, |
| "loss": 8.829, |
| "loss/crossentropy": 2.2091607600450516, |
| "loss/hidden": 3.83515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2670388799160719, |
| "step": 3330 |
| }, |
| { |
| "epoch": 0.334, |
| "grad_norm": 37.5, |
| "grad_norm_var": 38.63098958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.759, |
| "loss/crossentropy": 2.410393309593201, |
| "loss/hidden": 3.73359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2507057674229145, |
| "step": 3340 |
| }, |
| { |
| "epoch": 0.335, |
| "grad_norm": 35.25, |
| "grad_norm_var": 9.292122395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.6462, |
| "loss/crossentropy": 2.299106788635254, |
| "loss/hidden": 3.76328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2520503532141447, |
| "step": 3350 |
| }, |
| { |
| "epoch": 0.336, |
| "grad_norm": 44.25, |
| "grad_norm_var": 13.2869140625, |
| "learning_rate": 0.0001, |
| "loss": 8.6432, |
| "loss/crossentropy": 2.2736548662185667, |
| "loss/hidden": 3.81796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2679216586053371, |
| "step": 3360 |
| }, |
| { |
| "epoch": 0.337, |
| "grad_norm": 40.25, |
| "grad_norm_var": 16.704622395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.6969, |
| "loss/crossentropy": 2.300890862941742, |
| "loss/hidden": 3.85703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27277763597667215, |
| "step": 3370 |
| }, |
| { |
| "epoch": 0.338, |
| "grad_norm": 36.25, |
| "grad_norm_var": 7.362239583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.6139, |
| "loss/crossentropy": 2.2713751554489137, |
| "loss/hidden": 3.739453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2698351971805096, |
| "step": 3380 |
| }, |
| { |
| "epoch": 0.339, |
| "grad_norm": 35.5, |
| "grad_norm_var": 7.383333333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.6652, |
| "loss/crossentropy": 2.154055279493332, |
| "loss/hidden": 3.784375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2516574438661337, |
| "step": 3390 |
| }, |
| { |
| "epoch": 0.34, |
| "grad_norm": 46.5, |
| "grad_norm_var": 231.42083333333332, |
| "learning_rate": 0.0001, |
| "loss": 8.7521, |
| "loss/crossentropy": 2.1560363829135896, |
| "loss/hidden": 3.78828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2552214227616787, |
| "step": 3400 |
| }, |
| { |
| "epoch": 0.341, |
| "grad_norm": 34.0, |
| "grad_norm_var": 16.01015625, |
| "learning_rate": 0.0001, |
| "loss": 8.6719, |
| "loss/crossentropy": 2.210598033666611, |
| "loss/hidden": 3.7078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24775836057960987, |
| "step": 3410 |
| }, |
| { |
| "epoch": 0.342, |
| "grad_norm": 46.75, |
| "grad_norm_var": 23.555989583333332, |
| "learning_rate": 0.0001, |
| "loss": 8.732, |
| "loss/crossentropy": 2.0089349642395975, |
| "loss/hidden": 3.95625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24481147788465024, |
| "step": 3420 |
| }, |
| { |
| "epoch": 0.343, |
| "grad_norm": 43.5, |
| "grad_norm_var": 24.71015625, |
| "learning_rate": 0.0001, |
| "loss": 8.5835, |
| "loss/crossentropy": 2.167103961110115, |
| "loss/hidden": 3.73046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23311931267380714, |
| "step": 3430 |
| }, |
| { |
| "epoch": 0.344, |
| "grad_norm": 38.75, |
| "grad_norm_var": 16.239322916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.7327, |
| "loss/crossentropy": 2.223423732817173, |
| "loss/hidden": 3.916796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27787868976593016, |
| "step": 3440 |
| }, |
| { |
| "epoch": 0.345, |
| "grad_norm": 32.5, |
| "grad_norm_var": 12.4166015625, |
| "learning_rate": 0.0001, |
| "loss": 8.6674, |
| "loss/crossentropy": 2.190432313084602, |
| "loss/hidden": 3.933203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2527661222964525, |
| "step": 3450 |
| }, |
| { |
| "epoch": 0.346, |
| "grad_norm": 38.5, |
| "grad_norm_var": 239.47337239583334, |
| "learning_rate": 0.0001, |
| "loss": 8.678, |
| "loss/crossentropy": 2.121590741723776, |
| "loss/hidden": 3.746875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22831694399937988, |
| "step": 3460 |
| }, |
| { |
| "epoch": 0.347, |
| "grad_norm": 32.5, |
| "grad_norm_var": 24.29765625, |
| "learning_rate": 0.0001, |
| "loss": 8.5703, |
| "loss/crossentropy": 2.238359749317169, |
| "loss/hidden": 3.776171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2545921359211206, |
| "step": 3470 |
| }, |
| { |
| "epoch": 0.348, |
| "grad_norm": 31.375, |
| "grad_norm_var": 57.77291666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.667, |
| "loss/crossentropy": 2.183069321513176, |
| "loss/hidden": 3.723828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26235801838338374, |
| "step": 3480 |
| }, |
| { |
| "epoch": 0.349, |
| "grad_norm": 37.25, |
| "grad_norm_var": 47.95104166666667, |
| "learning_rate": 0.0001, |
| "loss": 8.6657, |
| "loss/crossentropy": 2.294294211268425, |
| "loss/hidden": 3.7875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2434792961925268, |
| "step": 3490 |
| }, |
| { |
| "epoch": 0.35, |
| "grad_norm": 55.75, |
| "grad_norm_var": 32.8931640625, |
| "learning_rate": 0.0001, |
| "loss": 8.6293, |
| "loss/crossentropy": 2.2934862852096556, |
| "loss/hidden": 3.7671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2526032764464617, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.351, |
| "grad_norm": 41.75, |
| "grad_norm_var": 37.820247395833334, |
| "learning_rate": 0.0001, |
| "loss": 8.6558, |
| "loss/crossentropy": 2.240943320095539, |
| "loss/hidden": 3.798828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2533996310085058, |
| "step": 3510 |
| }, |
| { |
| "epoch": 0.352, |
| "grad_norm": 34.0, |
| "grad_norm_var": 19.158333333333335, |
| "learning_rate": 0.0001, |
| "loss": 8.7265, |
| "loss/crossentropy": 2.2778089314699175, |
| "loss/hidden": 3.74765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24110115878283978, |
| "step": 3520 |
| }, |
| { |
| "epoch": 0.353, |
| "grad_norm": 33.75, |
| "grad_norm_var": 14.61640625, |
| "learning_rate": 0.0001, |
| "loss": 8.6184, |
| "loss/crossentropy": 2.2676281452178957, |
| "loss/hidden": 3.703515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25084604155272244, |
| "step": 3530 |
| }, |
| { |
| "epoch": 0.354, |
| "grad_norm": 41.25, |
| "grad_norm_var": 11.601822916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.6994, |
| "loss/crossentropy": 2.291515235602856, |
| "loss/hidden": 3.919140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.29151172675192355, |
| "step": 3540 |
| }, |
| { |
| "epoch": 0.355, |
| "grad_norm": 35.75, |
| "grad_norm_var": 7.620833333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.606, |
| "loss/crossentropy": 2.2359238654375075, |
| "loss/hidden": 3.81484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25299829691648484, |
| "step": 3550 |
| }, |
| { |
| "epoch": 0.356, |
| "grad_norm": 35.25, |
| "grad_norm_var": 7.1, |
| "learning_rate": 0.0001, |
| "loss": 8.5051, |
| "loss/crossentropy": 2.2826671570539476, |
| "loss/hidden": 3.843359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26029736921191216, |
| "step": 3560 |
| }, |
| { |
| "epoch": 0.357, |
| "grad_norm": 38.5, |
| "grad_norm_var": 7.422330729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.5601, |
| "loss/crossentropy": 2.29564026594162, |
| "loss/hidden": 3.790234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24953206069767475, |
| "step": 3570 |
| }, |
| { |
| "epoch": 0.358, |
| "grad_norm": 34.25, |
| "grad_norm_var": 9.414518229166667, |
| "learning_rate": 0.0001, |
| "loss": 8.677, |
| "loss/crossentropy": 2.360885411500931, |
| "loss/hidden": 3.812890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2608415879309177, |
| "step": 3580 |
| }, |
| { |
| "epoch": 0.359, |
| "grad_norm": 41.75, |
| "grad_norm_var": 7.558333333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.591, |
| "loss/crossentropy": 2.0790357582271097, |
| "loss/hidden": 3.759375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23188311588019134, |
| "step": 3590 |
| }, |
| { |
| "epoch": 0.36, |
| "grad_norm": 38.0, |
| "grad_norm_var": 10.890625, |
| "learning_rate": 0.0001, |
| "loss": 8.6616, |
| "loss/crossentropy": 2.289612150192261, |
| "loss/hidden": 3.878125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2747206833213568, |
| "step": 3600 |
| }, |
| { |
| "epoch": 0.361, |
| "grad_norm": 42.0, |
| "grad_norm_var": 14.145572916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.589, |
| "loss/crossentropy": 2.132586442679167, |
| "loss/hidden": 3.811328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25861772149801254, |
| "step": 3610 |
| }, |
| { |
| "epoch": 0.362, |
| "grad_norm": 35.0, |
| "grad_norm_var": 11.537239583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.6171, |
| "loss/crossentropy": 2.3269239187240602, |
| "loss/hidden": 3.8265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2712526451796293, |
| "step": 3620 |
| }, |
| { |
| "epoch": 0.363, |
| "grad_norm": 33.75, |
| "grad_norm_var": 20.012239583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.5134, |
| "loss/crossentropy": 2.326130175590515, |
| "loss/hidden": 3.791796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24642233476042746, |
| "step": 3630 |
| }, |
| { |
| "epoch": 0.364, |
| "grad_norm": 31.875, |
| "grad_norm_var": 30.459830729166665, |
| "learning_rate": 0.0001, |
| "loss": 8.5808, |
| "loss/crossentropy": 2.30035699903965, |
| "loss/hidden": 3.733984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26470062173902986, |
| "step": 3640 |
| }, |
| { |
| "epoch": 0.365, |
| "grad_norm": 37.25, |
| "grad_norm_var": 8.626041666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.5614, |
| "loss/crossentropy": 2.3303778156638146, |
| "loss/hidden": 3.871875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27759894989430905, |
| "step": 3650 |
| }, |
| { |
| "epoch": 0.366, |
| "grad_norm": 33.0, |
| "grad_norm_var": 5.374739583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.5104, |
| "loss/crossentropy": 2.3886318862438203, |
| "loss/hidden": 3.801953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26098744831979276, |
| "step": 3660 |
| }, |
| { |
| "epoch": 0.367, |
| "grad_norm": 28.875, |
| "grad_norm_var": 18.685416666666665, |
| "learning_rate": 0.0001, |
| "loss": 8.5148, |
| "loss/crossentropy": 2.3471581265330315, |
| "loss/hidden": 3.760546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.258730498701334, |
| "step": 3670 |
| }, |
| { |
| "epoch": 0.368, |
| "grad_norm": 48.5, |
| "grad_norm_var": 36.4744140625, |
| "learning_rate": 0.0001, |
| "loss": 8.5484, |
| "loss/crossentropy": 2.1698090970516204, |
| "loss/hidden": 3.70078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24077225737273694, |
| "step": 3680 |
| }, |
| { |
| "epoch": 0.369, |
| "grad_norm": 30.125, |
| "grad_norm_var": 25.111393229166666, |
| "learning_rate": 0.0001, |
| "loss": 8.5671, |
| "loss/crossentropy": 2.1866169169545175, |
| "loss/hidden": 3.8203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24500060379505156, |
| "step": 3690 |
| }, |
| { |
| "epoch": 0.37, |
| "grad_norm": 30.375, |
| "grad_norm_var": 2.130484072425508e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.6635, |
| "loss/crossentropy": 2.453831446170807, |
| "loss/hidden": 4.06171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2722825076431036, |
| "step": 3700 |
| }, |
| { |
| "epoch": 0.371, |
| "grad_norm": 32.5, |
| "grad_norm_var": 32.78795572916667, |
| "learning_rate": 0.0001, |
| "loss": 8.5788, |
| "loss/crossentropy": 2.212860561162233, |
| "loss/hidden": 3.629296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21873269081115723, |
| "step": 3710 |
| }, |
| { |
| "epoch": 0.372, |
| "grad_norm": 29.625, |
| "grad_norm_var": 31.475455729166665, |
| "learning_rate": 0.0001, |
| "loss": 8.5924, |
| "loss/crossentropy": 2.3226836264133452, |
| "loss/hidden": 3.711328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2526125326752663, |
| "step": 3720 |
| }, |
| { |
| "epoch": 0.373, |
| "grad_norm": 40.25, |
| "grad_norm_var": 74.74680989583334, |
| "learning_rate": 0.0001, |
| "loss": 8.7309, |
| "loss/crossentropy": 2.211090712249279, |
| "loss/hidden": 3.85078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2619786085560918, |
| "step": 3730 |
| }, |
| { |
| "epoch": 0.374, |
| "grad_norm": 30.5, |
| "grad_norm_var": 49.24108072916667, |
| "learning_rate": 0.0001, |
| "loss": 8.4683, |
| "loss/crossentropy": 2.117314028739929, |
| "loss/hidden": 3.787890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24047958478331566, |
| "step": 3740 |
| }, |
| { |
| "epoch": 0.375, |
| "grad_norm": 43.0, |
| "grad_norm_var": 13.481184895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.5891, |
| "loss/crossentropy": 2.3331554174423217, |
| "loss/hidden": 3.737109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.265634342469275, |
| "step": 3750 |
| }, |
| { |
| "epoch": 0.376, |
| "grad_norm": 70.0, |
| "grad_norm_var": 86.09837239583334, |
| "learning_rate": 0.0001, |
| "loss": 8.4671, |
| "loss/crossentropy": 2.279883709549904, |
| "loss/hidden": 3.764453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2557247843593359, |
| "step": 3760 |
| }, |
| { |
| "epoch": 0.377, |
| "grad_norm": 37.5, |
| "grad_norm_var": 101.2072265625, |
| "learning_rate": 0.0001, |
| "loss": 8.6263, |
| "loss/crossentropy": 2.119837614893913, |
| "loss/hidden": 3.7328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24443967882543802, |
| "step": 3770 |
| }, |
| { |
| "epoch": 0.378, |
| "grad_norm": 36.0, |
| "grad_norm_var": 11.41015625, |
| "learning_rate": 0.0001, |
| "loss": 8.573, |
| "loss/crossentropy": 2.2743908286094667, |
| "loss/hidden": 3.7046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24655351527035235, |
| "step": 3780 |
| }, |
| { |
| "epoch": 0.379, |
| "grad_norm": 30.5, |
| "grad_norm_var": 22.8337890625, |
| "learning_rate": 0.0001, |
| "loss": 8.5496, |
| "loss/crossentropy": 2.298393335938454, |
| "loss/hidden": 3.661328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2468328095972538, |
| "step": 3790 |
| }, |
| { |
| "epoch": 0.38, |
| "grad_norm": 36.75, |
| "grad_norm_var": 21.886393229166668, |
| "learning_rate": 0.0001, |
| "loss": 8.4728, |
| "loss/crossentropy": 2.285892593860626, |
| "loss/hidden": 3.696484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22759304326027632, |
| "step": 3800 |
| }, |
| { |
| "epoch": 0.381, |
| "grad_norm": 37.0, |
| "grad_norm_var": 25.983268229166665, |
| "learning_rate": 0.0001, |
| "loss": 8.4654, |
| "loss/crossentropy": 2.200984264165163, |
| "loss/hidden": 3.760546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2451606505550444, |
| "step": 3810 |
| }, |
| { |
| "epoch": 0.382, |
| "grad_norm": 37.5, |
| "grad_norm_var": 13.956705729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.5469, |
| "loss/crossentropy": 2.2647089801728724, |
| "loss/hidden": 3.803125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2533489029854536, |
| "step": 3820 |
| }, |
| { |
| "epoch": 0.383, |
| "grad_norm": 34.75, |
| "grad_norm_var": 7.221809895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.579, |
| "loss/crossentropy": 2.386853316426277, |
| "loss/hidden": 3.795703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2518702711910009, |
| "step": 3830 |
| }, |
| { |
| "epoch": 0.384, |
| "grad_norm": 42.0, |
| "grad_norm_var": 15.8650390625, |
| "learning_rate": 0.0001, |
| "loss": 8.4816, |
| "loss/crossentropy": 2.3519598811864855, |
| "loss/hidden": 3.75625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.253318839520216, |
| "step": 3840 |
| }, |
| { |
| "epoch": 0.385, |
| "grad_norm": 38.25, |
| "grad_norm_var": 14.728580729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.7292, |
| "loss/crossentropy": 2.164312995970249, |
| "loss/hidden": 3.8234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24283661209046842, |
| "step": 3850 |
| }, |
| { |
| "epoch": 0.386, |
| "grad_norm": 37.75, |
| "grad_norm_var": 14.876822916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.6236, |
| "loss/crossentropy": 2.1916888520121574, |
| "loss/hidden": 3.69140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23480207994580268, |
| "step": 3860 |
| }, |
| { |
| "epoch": 0.387, |
| "grad_norm": 33.25, |
| "grad_norm_var": 17.855208333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.4472, |
| "loss/crossentropy": 2.2794968456029894, |
| "loss/hidden": 3.759375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2547815594822168, |
| "step": 3870 |
| }, |
| { |
| "epoch": 0.388, |
| "grad_norm": 39.75, |
| "grad_norm_var": 10.063541666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.5547, |
| "loss/crossentropy": 2.2261194586753845, |
| "loss/hidden": 3.73203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24962050542235376, |
| "step": 3880 |
| }, |
| { |
| "epoch": 0.389, |
| "grad_norm": 33.75, |
| "grad_norm_var": 25.229622395833335, |
| "learning_rate": 0.0001, |
| "loss": 8.428, |
| "loss/crossentropy": 2.2669249922037125, |
| "loss/hidden": 3.589453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22769966274499892, |
| "step": 3890 |
| }, |
| { |
| "epoch": 0.39, |
| "grad_norm": 34.75, |
| "grad_norm_var": 22.89765625, |
| "learning_rate": 0.0001, |
| "loss": 8.4932, |
| "loss/crossentropy": 2.2599128648638724, |
| "loss/hidden": 3.744921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24268860407173634, |
| "step": 3900 |
| }, |
| { |
| "epoch": 0.391, |
| "grad_norm": 37.0, |
| "grad_norm_var": 44.83483072916667, |
| "learning_rate": 0.0001, |
| "loss": 8.5202, |
| "loss/crossentropy": 2.1972982972860335, |
| "loss/hidden": 3.812109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25300098545849325, |
| "step": 3910 |
| }, |
| { |
| "epoch": 0.392, |
| "grad_norm": 40.25, |
| "grad_norm_var": 11.49140625, |
| "learning_rate": 0.0001, |
| "loss": 8.5002, |
| "loss/crossentropy": 2.347226142883301, |
| "loss/hidden": 3.658203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24455956518650054, |
| "step": 3920 |
| }, |
| { |
| "epoch": 0.393, |
| "grad_norm": 31.125, |
| "grad_norm_var": 17.219791666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.4054, |
| "loss/crossentropy": 2.2351839393377304, |
| "loss/hidden": 3.59140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22245508581399917, |
| "step": 3930 |
| }, |
| { |
| "epoch": 0.394, |
| "grad_norm": 37.0, |
| "grad_norm_var": 11.382747395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.4771, |
| "loss/crossentropy": 2.4160192787647246, |
| "loss/hidden": 3.700390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2585303969681263, |
| "step": 3940 |
| }, |
| { |
| "epoch": 0.395, |
| "grad_norm": 30.875, |
| "grad_norm_var": 10.731705729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.4064, |
| "loss/crossentropy": 2.329276342689991, |
| "loss/hidden": 3.641796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23809341453015803, |
| "step": 3950 |
| }, |
| { |
| "epoch": 0.396, |
| "grad_norm": 31.875, |
| "grad_norm_var": 18.456184895833335, |
| "learning_rate": 0.0001, |
| "loss": 8.4948, |
| "loss/crossentropy": 2.397703355550766, |
| "loss/hidden": 3.8703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2577156092971563, |
| "step": 3960 |
| }, |
| { |
| "epoch": 0.397, |
| "grad_norm": 35.25, |
| "grad_norm_var": 16.1291015625, |
| "learning_rate": 0.0001, |
| "loss": 8.5021, |
| "loss/crossentropy": 2.3261855766177177, |
| "loss/hidden": 3.694921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2523797513917089, |
| "step": 3970 |
| }, |
| { |
| "epoch": 0.398, |
| "grad_norm": 33.0, |
| "grad_norm_var": 8.205208333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.4531, |
| "loss/crossentropy": 2.235724928975105, |
| "loss/hidden": 3.7296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2461556438356638, |
| "step": 3980 |
| }, |
| { |
| "epoch": 0.399, |
| "grad_norm": 35.5, |
| "grad_norm_var": 7.25390625, |
| "learning_rate": 0.0001, |
| "loss": 8.3779, |
| "loss/crossentropy": 2.258603112399578, |
| "loss/hidden": 3.731640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2523756165057421, |
| "step": 3990 |
| }, |
| { |
| "epoch": 0.4, |
| "grad_norm": 32.5, |
| "grad_norm_var": 11.34765625, |
| "learning_rate": 0.0001, |
| "loss": 8.542, |
| "loss/crossentropy": 2.2897588342428206, |
| "loss/hidden": 3.6453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2516363400965929, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.401, |
| "grad_norm": 32.0, |
| "grad_norm_var": 9.889518229166667, |
| "learning_rate": 0.0001, |
| "loss": 8.4234, |
| "loss/crossentropy": 2.1721622362732886, |
| "loss/hidden": 3.74609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25033344645053146, |
| "step": 4010 |
| }, |
| { |
| "epoch": 0.402, |
| "grad_norm": 35.75, |
| "grad_norm_var": 15.199934895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.4018, |
| "loss/crossentropy": 2.1795839801430703, |
| "loss/hidden": 3.78515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24097833968698978, |
| "step": 4020 |
| }, |
| { |
| "epoch": 0.403, |
| "grad_norm": 37.75, |
| "grad_norm_var": 162.76451822916667, |
| "learning_rate": 0.0001, |
| "loss": 8.4738, |
| "loss/crossentropy": 2.4760424941778183, |
| "loss/hidden": 3.86484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.3052004296332598, |
| "step": 4030 |
| }, |
| { |
| "epoch": 0.404, |
| "grad_norm": 41.5, |
| "grad_norm_var": 2.408370239548424e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.4302, |
| "loss/crossentropy": 2.1855442106723784, |
| "loss/hidden": 3.77578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24037305619567634, |
| "step": 4040 |
| }, |
| { |
| "epoch": 0.405, |
| "grad_norm": 32.0, |
| "grad_norm_var": 2.4083702412167086e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.3213, |
| "loss/crossentropy": 2.22409378439188, |
| "loss/hidden": 3.653515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23280739206820728, |
| "step": 4050 |
| }, |
| { |
| "epoch": 0.406, |
| "grad_norm": 39.5, |
| "grad_norm_var": 9.825, |
| "learning_rate": 0.0001, |
| "loss": 8.4657, |
| "loss/crossentropy": 2.28346493691206, |
| "loss/hidden": 3.773828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2542352583259344, |
| "step": 4060 |
| }, |
| { |
| "epoch": 0.407, |
| "grad_norm": 39.75, |
| "grad_norm_var": 7.640625, |
| "learning_rate": 0.0001, |
| "loss": 8.3937, |
| "loss/crossentropy": 2.2639666229486464, |
| "loss/hidden": 3.851953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27007580138742926, |
| "step": 4070 |
| }, |
| { |
| "epoch": 0.408, |
| "grad_norm": 37.25, |
| "grad_norm_var": 10.493684895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.468, |
| "loss/crossentropy": 2.356726923584938, |
| "loss/hidden": 3.716796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.242316972091794, |
| "step": 4080 |
| }, |
| { |
| "epoch": 0.409, |
| "grad_norm": 34.5, |
| "grad_norm_var": 8.8462890625, |
| "learning_rate": 0.0001, |
| "loss": 8.3684, |
| "loss/crossentropy": 2.151221239566803, |
| "loss/hidden": 3.692578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2382324907928705, |
| "step": 4090 |
| }, |
| { |
| "epoch": 0.41, |
| "grad_norm": 37.0, |
| "grad_norm_var": 13.3775390625, |
| "learning_rate": 0.0001, |
| "loss": 8.296, |
| "loss/crossentropy": 2.1645509719848635, |
| "loss/hidden": 3.65703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2371783286333084, |
| "step": 4100 |
| }, |
| { |
| "epoch": 0.411, |
| "grad_norm": 32.5, |
| "grad_norm_var": 25.270247395833334, |
| "learning_rate": 0.0001, |
| "loss": 8.3435, |
| "loss/crossentropy": 2.1995768398046494, |
| "loss/hidden": 3.6640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23499403558671475, |
| "step": 4110 |
| }, |
| { |
| "epoch": 0.412, |
| "grad_norm": 38.0, |
| "grad_norm_var": 13.875, |
| "learning_rate": 0.0001, |
| "loss": 8.3746, |
| "loss/crossentropy": 2.135378623008728, |
| "loss/hidden": 3.612109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24154506418854, |
| "step": 4120 |
| }, |
| { |
| "epoch": 0.413, |
| "grad_norm": 40.25, |
| "grad_norm_var": 12.233072916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.3505, |
| "loss/crossentropy": 2.356252074241638, |
| "loss/hidden": 3.734765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2688302733004093, |
| "step": 4130 |
| }, |
| { |
| "epoch": 0.414, |
| "grad_norm": 39.25, |
| "grad_norm_var": 14.333072916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.4228, |
| "loss/crossentropy": 2.334370291233063, |
| "loss/hidden": 3.648046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23012813031673432, |
| "step": 4140 |
| }, |
| { |
| "epoch": 0.415, |
| "grad_norm": 35.5, |
| "grad_norm_var": 8.170833333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.3324, |
| "loss/crossentropy": 2.214457754790783, |
| "loss/hidden": 3.651171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24744220934808253, |
| "step": 4150 |
| }, |
| { |
| "epoch": 0.416, |
| "grad_norm": 37.0, |
| "grad_norm_var": 7.603580729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.3237, |
| "loss/crossentropy": 2.3053094416856768, |
| "loss/hidden": 3.62578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2290981512516737, |
| "step": 4160 |
| }, |
| { |
| "epoch": 0.417, |
| "grad_norm": 50.75, |
| "grad_norm_var": 23.6931640625, |
| "learning_rate": 0.0001, |
| "loss": 8.2895, |
| "loss/crossentropy": 2.100640784204006, |
| "loss/hidden": 3.72578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24771791882812977, |
| "step": 4170 |
| }, |
| { |
| "epoch": 0.418, |
| "grad_norm": 42.25, |
| "grad_norm_var": 25.6509765625, |
| "learning_rate": 0.0001, |
| "loss": 8.4997, |
| "loss/crossentropy": 2.4176384449005126, |
| "loss/hidden": 3.734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2665034931153059, |
| "step": 4180 |
| }, |
| { |
| "epoch": 0.419, |
| "grad_norm": 46.75, |
| "grad_norm_var": 16.7041015625, |
| "learning_rate": 0.0001, |
| "loss": 8.241, |
| "loss/crossentropy": 2.0916571110486983, |
| "loss/hidden": 3.640234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23109357040375472, |
| "step": 4190 |
| }, |
| { |
| "epoch": 0.42, |
| "grad_norm": 34.0, |
| "grad_norm_var": 10.265625, |
| "learning_rate": 0.0001, |
| "loss": 8.3903, |
| "loss/crossentropy": 2.4284214213490487, |
| "loss/hidden": 3.676171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24520040042698382, |
| "step": 4200 |
| }, |
| { |
| "epoch": 0.421, |
| "grad_norm": 48.25, |
| "grad_norm_var": 17.694205729166665, |
| "learning_rate": 0.0001, |
| "loss": 8.4379, |
| "loss/crossentropy": 2.342070159316063, |
| "loss/hidden": 3.750390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2608378600329161, |
| "step": 4210 |
| }, |
| { |
| "epoch": 0.422, |
| "grad_norm": 31.875, |
| "grad_norm_var": 21.2806640625, |
| "learning_rate": 0.0001, |
| "loss": 8.2667, |
| "loss/crossentropy": 2.3339773267507553, |
| "loss/hidden": 3.68828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24873733669519424, |
| "step": 4220 |
| }, |
| { |
| "epoch": 0.423, |
| "grad_norm": 34.0, |
| "grad_norm_var": 10.834375, |
| "learning_rate": 0.0001, |
| "loss": 8.313, |
| "loss/crossentropy": 2.2396500378847124, |
| "loss/hidden": 3.647265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23453602455556394, |
| "step": 4230 |
| }, |
| { |
| "epoch": 0.424, |
| "grad_norm": 31.875, |
| "grad_norm_var": 20.712239583333332, |
| "learning_rate": 0.0001, |
| "loss": 8.3885, |
| "loss/crossentropy": 2.1176706120371818, |
| "loss/hidden": 3.818359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24183569326996804, |
| "step": 4240 |
| }, |
| { |
| "epoch": 0.425, |
| "grad_norm": 46.0, |
| "grad_norm_var": 18.212434895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.4311, |
| "loss/crossentropy": 2.161303213238716, |
| "loss/hidden": 3.71015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23596356846392155, |
| "step": 4250 |
| }, |
| { |
| "epoch": 0.426, |
| "grad_norm": 31.75, |
| "grad_norm_var": 18.470833333333335, |
| "learning_rate": 0.0001, |
| "loss": 8.389, |
| "loss/crossentropy": 2.1959901452064514, |
| "loss/hidden": 3.683203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23274125456809996, |
| "step": 4260 |
| }, |
| { |
| "epoch": 0.427, |
| "grad_norm": 40.0, |
| "grad_norm_var": 15.642643229166667, |
| "learning_rate": 0.0001, |
| "loss": 8.3995, |
| "loss/crossentropy": 2.3957006752491, |
| "loss/hidden": 3.6546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23920847922563554, |
| "step": 4270 |
| }, |
| { |
| "epoch": 0.428, |
| "grad_norm": 32.25, |
| "grad_norm_var": 12.506705729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.3247, |
| "loss/crossentropy": 2.140459132194519, |
| "loss/hidden": 3.776953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24535099379718303, |
| "step": 4280 |
| }, |
| { |
| "epoch": 0.429, |
| "grad_norm": 31.125, |
| "grad_norm_var": 14.913997395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.2296, |
| "loss/crossentropy": 2.330910986661911, |
| "loss/hidden": 3.65546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25125612393021585, |
| "step": 4290 |
| }, |
| { |
| "epoch": 0.43, |
| "grad_norm": 31.0, |
| "grad_norm_var": 9.062955729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.2789, |
| "loss/crossentropy": 2.21281051337719, |
| "loss/hidden": 3.717578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24373065643012523, |
| "step": 4300 |
| }, |
| { |
| "epoch": 0.431, |
| "grad_norm": 46.25, |
| "grad_norm_var": 18.794205729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.283, |
| "loss/crossentropy": 2.182158187031746, |
| "loss/hidden": 3.6703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24544784277677537, |
| "step": 4310 |
| }, |
| { |
| "epoch": 0.432, |
| "grad_norm": 45.75, |
| "grad_norm_var": 38.16458333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.2852, |
| "loss/crossentropy": 2.365989252924919, |
| "loss/hidden": 3.706640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24662891514599322, |
| "step": 4320 |
| }, |
| { |
| "epoch": 0.433, |
| "grad_norm": 35.25, |
| "grad_norm_var": 16.307291666666668, |
| "learning_rate": 0.0001, |
| "loss": 8.2794, |
| "loss/crossentropy": 2.1546150177717207, |
| "loss/hidden": 3.620703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2297368910163641, |
| "step": 4330 |
| }, |
| { |
| "epoch": 0.434, |
| "grad_norm": 31.625, |
| "grad_norm_var": 19.6400390625, |
| "learning_rate": 0.0001, |
| "loss": 8.3956, |
| "loss/crossentropy": 2.385764144361019, |
| "loss/hidden": 3.7015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24324760176241397, |
| "step": 4340 |
| }, |
| { |
| "epoch": 0.435, |
| "grad_norm": 33.0, |
| "grad_norm_var": 9.987239583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.2695, |
| "loss/crossentropy": 2.112358179688454, |
| "loss/hidden": 3.788671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24638627246022224, |
| "step": 4350 |
| }, |
| { |
| "epoch": 0.436, |
| "grad_norm": 34.5, |
| "grad_norm_var": 8.0875, |
| "learning_rate": 0.0001, |
| "loss": 8.448, |
| "loss/crossentropy": 2.182591002434492, |
| "loss/hidden": 3.746875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24270438468083738, |
| "step": 4360 |
| }, |
| { |
| "epoch": 0.437, |
| "grad_norm": 38.25, |
| "grad_norm_var": 5.695833333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.3975, |
| "loss/crossentropy": 2.401635229587555, |
| "loss/hidden": 3.65859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24400906264781952, |
| "step": 4370 |
| }, |
| { |
| "epoch": 0.438, |
| "grad_norm": 34.75, |
| "grad_norm_var": 8.372330729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.3822, |
| "loss/crossentropy": 2.3247624695301057, |
| "loss/hidden": 3.591015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24138722717761993, |
| "step": 4380 |
| }, |
| { |
| "epoch": 0.439, |
| "grad_norm": 62.75, |
| "grad_norm_var": 70.6103515625, |
| "learning_rate": 0.0001, |
| "loss": 8.3893, |
| "loss/crossentropy": 2.1605212301015855, |
| "loss/hidden": 3.75, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.270247707888484, |
| "step": 4390 |
| }, |
| { |
| "epoch": 0.44, |
| "grad_norm": 34.5, |
| "grad_norm_var": 80.6375, |
| "learning_rate": 0.0001, |
| "loss": 8.3494, |
| "loss/crossentropy": 2.201382315158844, |
| "loss/hidden": 3.55390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22101359032094478, |
| "step": 4400 |
| }, |
| { |
| "epoch": 0.441, |
| "grad_norm": 37.75, |
| "grad_norm_var": 12.06875, |
| "learning_rate": 0.0001, |
| "loss": 8.3221, |
| "loss/crossentropy": 2.1608468025922773, |
| "loss/hidden": 3.570703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22636283356696368, |
| "step": 4410 |
| }, |
| { |
| "epoch": 0.442, |
| "grad_norm": 32.25, |
| "grad_norm_var": 16.13515625, |
| "learning_rate": 0.0001, |
| "loss": 8.2013, |
| "loss/crossentropy": 2.2836680516600607, |
| "loss/hidden": 3.6765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24737481120973825, |
| "step": 4420 |
| }, |
| { |
| "epoch": 0.443, |
| "grad_norm": 34.5, |
| "grad_norm_var": 14.570768229166667, |
| "learning_rate": 0.0001, |
| "loss": 8.3241, |
| "loss/crossentropy": 2.1778072111308573, |
| "loss/hidden": 3.685546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23579915445297955, |
| "step": 4430 |
| }, |
| { |
| "epoch": 0.444, |
| "grad_norm": 33.5, |
| "grad_norm_var": 14.844205729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.2468, |
| "loss/crossentropy": 2.115637184679508, |
| "loss/hidden": 3.54921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22633790075778962, |
| "step": 4440 |
| }, |
| { |
| "epoch": 0.445, |
| "grad_norm": 40.0, |
| "grad_norm_var": 8.757291666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.3095, |
| "loss/crossentropy": 2.2457625687122347, |
| "loss/hidden": 3.678125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24750035293400288, |
| "step": 4450 |
| }, |
| { |
| "epoch": 0.446, |
| "grad_norm": 30.75, |
| "grad_norm_var": 9.359309895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.3604, |
| "loss/crossentropy": 2.2244989693164827, |
| "loss/hidden": 3.742578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.27126055024564266, |
| "step": 4460 |
| }, |
| { |
| "epoch": 0.447, |
| "grad_norm": 32.75, |
| "grad_norm_var": 8.16015625, |
| "learning_rate": 0.0001, |
| "loss": 8.293, |
| "loss/crossentropy": 2.208540087938309, |
| "loss/hidden": 3.591796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23848242741078138, |
| "step": 4470 |
| }, |
| { |
| "epoch": 0.448, |
| "grad_norm": 31.5, |
| "grad_norm_var": 27.215625, |
| "learning_rate": 0.0001, |
| "loss": 8.4332, |
| "loss/crossentropy": 2.182788160443306, |
| "loss/hidden": 3.775, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26532087065279486, |
| "step": 4480 |
| }, |
| { |
| "epoch": 0.449, |
| "grad_norm": 39.5, |
| "grad_norm_var": 29.916666666666668, |
| "learning_rate": 0.0001, |
| "loss": 8.2552, |
| "loss/crossentropy": 2.172296644747257, |
| "loss/hidden": 3.66875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23516011722385882, |
| "step": 4490 |
| }, |
| { |
| "epoch": 0.45, |
| "grad_norm": 33.75, |
| "grad_norm_var": 21.775455729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.3872, |
| "loss/crossentropy": 2.304448103904724, |
| "loss/hidden": 3.56015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23418739810585976, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.451, |
| "grad_norm": 31.75, |
| "grad_norm_var": 23.40390625, |
| "learning_rate": 0.0001, |
| "loss": 8.4055, |
| "loss/crossentropy": 2.2493974685668947, |
| "loss/hidden": 3.652734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2410556711256504, |
| "step": 4510 |
| }, |
| { |
| "epoch": 0.452, |
| "grad_norm": 32.25, |
| "grad_norm_var": 19.04375, |
| "learning_rate": 0.0001, |
| "loss": 8.2847, |
| "loss/crossentropy": 2.0756215125322344, |
| "loss/hidden": 3.658984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23735107891261578, |
| "step": 4520 |
| }, |
| { |
| "epoch": 0.453, |
| "grad_norm": 38.5, |
| "grad_norm_var": 11.048958333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.4058, |
| "loss/crossentropy": 2.2705332577228545, |
| "loss/hidden": 3.75703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2552911601960659, |
| "step": 4530 |
| }, |
| { |
| "epoch": 0.454, |
| "grad_norm": 32.25, |
| "grad_norm_var": 15.964583333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.3352, |
| "loss/crossentropy": 2.2062640622258187, |
| "loss/hidden": 3.663671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23420217223465442, |
| "step": 4540 |
| }, |
| { |
| "epoch": 0.455, |
| "grad_norm": 35.5, |
| "grad_norm_var": 18.8041015625, |
| "learning_rate": 0.0001, |
| "loss": 8.1398, |
| "loss/crossentropy": 2.2153613708913324, |
| "loss/hidden": 3.688671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23869724282994867, |
| "step": 4550 |
| }, |
| { |
| "epoch": 0.456, |
| "grad_norm": 32.25, |
| "grad_norm_var": 15.843489583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.3482, |
| "loss/crossentropy": 2.2614838272333144, |
| "loss/hidden": 3.719921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2509554075077176, |
| "step": 4560 |
| }, |
| { |
| "epoch": 0.457, |
| "grad_norm": 37.0, |
| "grad_norm_var": 17.045572916666668, |
| "learning_rate": 0.0001, |
| "loss": 8.3724, |
| "loss/crossentropy": 2.1315455704927446, |
| "loss/hidden": 3.720703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24938025698065758, |
| "step": 4570 |
| }, |
| { |
| "epoch": 0.458, |
| "grad_norm": 31.75, |
| "grad_norm_var": 18.753580729166668, |
| "learning_rate": 0.0001, |
| "loss": 8.1789, |
| "loss/crossentropy": 2.250936383008957, |
| "loss/hidden": 3.601953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23103775745257735, |
| "step": 4580 |
| }, |
| { |
| "epoch": 0.459, |
| "grad_norm": 31.75, |
| "grad_norm_var": 9.3087890625, |
| "learning_rate": 0.0001, |
| "loss": 8.2367, |
| "loss/crossentropy": 2.0491979137063026, |
| "loss/hidden": 3.767578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24958254247903824, |
| "step": 4590 |
| }, |
| { |
| "epoch": 0.46, |
| "grad_norm": 32.75, |
| "grad_norm_var": 6.034309895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.2813, |
| "loss/crossentropy": 2.1869849786162376, |
| "loss/hidden": 3.678125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2445020995102823, |
| "step": 4600 |
| }, |
| { |
| "epoch": 0.461, |
| "grad_norm": 33.25, |
| "grad_norm_var": 16.960416666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.2774, |
| "loss/crossentropy": 2.3401444420218467, |
| "loss/hidden": 3.644140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2346777945756912, |
| "step": 4610 |
| }, |
| { |
| "epoch": 0.462, |
| "grad_norm": 31.875, |
| "grad_norm_var": 20.257747395833334, |
| "learning_rate": 0.0001, |
| "loss": 8.261, |
| "loss/crossentropy": 2.2197701543569566, |
| "loss/hidden": 3.671484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24417277611792088, |
| "step": 4620 |
| }, |
| { |
| "epoch": 0.463, |
| "grad_norm": 39.75, |
| "grad_norm_var": 21.111458333333335, |
| "learning_rate": 0.0001, |
| "loss": 8.1879, |
| "loss/crossentropy": 2.2979017451405523, |
| "loss/hidden": 3.584765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24459463655948638, |
| "step": 4630 |
| }, |
| { |
| "epoch": 0.464, |
| "grad_norm": 37.75, |
| "grad_norm_var": 18.424739583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.2222, |
| "loss/crossentropy": 2.1899428203701974, |
| "loss/hidden": 3.63828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23625375218689443, |
| "step": 4640 |
| }, |
| { |
| "epoch": 0.465, |
| "grad_norm": 33.0, |
| "grad_norm_var": 5.676822916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.2134, |
| "loss/crossentropy": 2.173795387148857, |
| "loss/hidden": 3.653515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21926050689071416, |
| "step": 4650 |
| }, |
| { |
| "epoch": 0.466, |
| "grad_norm": 32.75, |
| "grad_norm_var": 5.024739583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.2134, |
| "loss/crossentropy": 2.2807129830121995, |
| "loss/hidden": 3.5890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22838456649333239, |
| "step": 4660 |
| }, |
| { |
| "epoch": 0.467, |
| "grad_norm": 32.75, |
| "grad_norm_var": 7.588541666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.2508, |
| "loss/crossentropy": 2.19968124628067, |
| "loss/hidden": 3.590625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23003219701349736, |
| "step": 4670 |
| }, |
| { |
| "epoch": 0.468, |
| "grad_norm": 32.25, |
| "grad_norm_var": 8.268489583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.2723, |
| "loss/crossentropy": 2.3073908984661102, |
| "loss/hidden": 3.621484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23195548579096795, |
| "step": 4680 |
| }, |
| { |
| "epoch": 0.469, |
| "grad_norm": 34.25, |
| "grad_norm_var": 7.398372395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.3115, |
| "loss/crossentropy": 2.251572087407112, |
| "loss/hidden": 3.615234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24007561076432465, |
| "step": 4690 |
| }, |
| { |
| "epoch": 0.47, |
| "grad_norm": 37.25, |
| "grad_norm_var": 12.067708333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.215, |
| "loss/crossentropy": 2.179315264523029, |
| "loss/hidden": 3.614453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23048642594367266, |
| "step": 4700 |
| }, |
| { |
| "epoch": 0.471, |
| "grad_norm": 33.5, |
| "grad_norm_var": 9.384830729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.4657, |
| "loss/crossentropy": 2.304041627049446, |
| "loss/hidden": 3.705859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2532807156443596, |
| "step": 4710 |
| }, |
| { |
| "epoch": 0.472, |
| "grad_norm": 34.75, |
| "grad_norm_var": 2.5403116373864177e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.3845, |
| "loss/crossentropy": 2.28207755535841, |
| "loss/hidden": 3.6125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24173217974603176, |
| "step": 4720 |
| }, |
| { |
| "epoch": 0.473, |
| "grad_norm": 31.375, |
| "grad_norm_var": 2.8580729166666665, |
| "learning_rate": 0.0001, |
| "loss": 8.1924, |
| "loss/crossentropy": 2.317093315720558, |
| "loss/hidden": 3.50859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22850329093635083, |
| "step": 4730 |
| }, |
| { |
| "epoch": 0.474, |
| "grad_norm": 38.5, |
| "grad_norm_var": 55.86354166666667, |
| "learning_rate": 0.0001, |
| "loss": 8.205, |
| "loss/crossentropy": 2.2346624046564103, |
| "loss/hidden": 3.76015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2535475058481097, |
| "step": 4740 |
| }, |
| { |
| "epoch": 0.475, |
| "grad_norm": 32.75, |
| "grad_norm_var": 6.105989583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.2004, |
| "loss/crossentropy": 2.205992843210697, |
| "loss/hidden": 3.5, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2157002430409193, |
| "step": 4750 |
| }, |
| { |
| "epoch": 0.476, |
| "grad_norm": 42.5, |
| "grad_norm_var": 7.551041666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.2493, |
| "loss/crossentropy": 2.2944780766963957, |
| "loss/hidden": 3.553515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2302501540631056, |
| "step": 4760 |
| }, |
| { |
| "epoch": 0.477, |
| "grad_norm": 34.5, |
| "grad_norm_var": 54.297916666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.2422, |
| "loss/crossentropy": 2.349091801047325, |
| "loss/hidden": 3.726953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25370817482471464, |
| "step": 4770 |
| }, |
| { |
| "epoch": 0.478, |
| "grad_norm": 35.25, |
| "grad_norm_var": 48.0119140625, |
| "learning_rate": 0.0001, |
| "loss": 8.1697, |
| "loss/crossentropy": 2.419427090883255, |
| "loss/hidden": 3.5984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23952311277389526, |
| "step": 4780 |
| }, |
| { |
| "epoch": 0.479, |
| "grad_norm": 44.0, |
| "grad_norm_var": 34.6509765625, |
| "learning_rate": 0.0001, |
| "loss": 8.2147, |
| "loss/crossentropy": 2.2663041442632674, |
| "loss/hidden": 3.512890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22062067724764348, |
| "step": 4790 |
| }, |
| { |
| "epoch": 0.48, |
| "grad_norm": 30.375, |
| "grad_norm_var": 38.708333333333336, |
| "learning_rate": 0.0001, |
| "loss": 8.1478, |
| "loss/crossentropy": 2.3492169111967085, |
| "loss/hidden": 3.63359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23971957936882973, |
| "step": 4800 |
| }, |
| { |
| "epoch": 0.481, |
| "grad_norm": 36.75, |
| "grad_norm_var": 9.3056640625, |
| "learning_rate": 0.0001, |
| "loss": 8.165, |
| "loss/crossentropy": 2.232720893621445, |
| "loss/hidden": 3.580078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22206582501530647, |
| "step": 4810 |
| }, |
| { |
| "epoch": 0.482, |
| "grad_norm": 34.25, |
| "grad_norm_var": 6.8197265625, |
| "learning_rate": 0.0001, |
| "loss": 8.1216, |
| "loss/crossentropy": 2.1783568069338797, |
| "loss/hidden": 3.4515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21557580903172494, |
| "step": 4820 |
| }, |
| { |
| "epoch": 0.483, |
| "grad_norm": 31.5, |
| "grad_norm_var": 6.039322916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.2857, |
| "loss/crossentropy": 2.3961785644292832, |
| "loss/hidden": 3.640234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.257023797929287, |
| "step": 4830 |
| }, |
| { |
| "epoch": 0.484, |
| "grad_norm": 33.0, |
| "grad_norm_var": 12.29375, |
| "learning_rate": 0.0001, |
| "loss": 8.1802, |
| "loss/crossentropy": 2.310921123623848, |
| "loss/hidden": 3.526953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23252013735473157, |
| "step": 4840 |
| }, |
| { |
| "epoch": 0.485, |
| "grad_norm": 33.25, |
| "grad_norm_var": 23.69140625, |
| "learning_rate": 0.0001, |
| "loss": 8.2647, |
| "loss/crossentropy": 2.152234472334385, |
| "loss/hidden": 3.6578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2241989640519023, |
| "step": 4850 |
| }, |
| { |
| "epoch": 0.486, |
| "grad_norm": 32.75, |
| "grad_norm_var": 26.65390625, |
| "learning_rate": 0.0001, |
| "loss": 8.2819, |
| "loss/crossentropy": 2.1197937928140163, |
| "loss/hidden": 3.702734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24686675220727922, |
| "step": 4860 |
| }, |
| { |
| "epoch": 0.487, |
| "grad_norm": 37.25, |
| "grad_norm_var": 13.0228515625, |
| "learning_rate": 0.0001, |
| "loss": 8.3156, |
| "loss/crossentropy": 2.345510223507881, |
| "loss/hidden": 3.571484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22903156131505967, |
| "step": 4870 |
| }, |
| { |
| "epoch": 0.488, |
| "grad_norm": 34.5, |
| "grad_norm_var": 6.5337890625, |
| "learning_rate": 0.0001, |
| "loss": 8.1946, |
| "loss/crossentropy": 2.1908657550811768, |
| "loss/hidden": 3.682421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23578502163290976, |
| "step": 4880 |
| }, |
| { |
| "epoch": 0.489, |
| "grad_norm": 33.75, |
| "grad_norm_var": 24.545572916666668, |
| "learning_rate": 0.0001, |
| "loss": 8.1165, |
| "loss/crossentropy": 2.180183355510235, |
| "loss/hidden": 3.661328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23667961843311786, |
| "step": 4890 |
| }, |
| { |
| "epoch": 0.49, |
| "grad_norm": 32.75, |
| "grad_norm_var": 58.805989583333336, |
| "learning_rate": 0.0001, |
| "loss": 8.127, |
| "loss/crossentropy": 2.206071509420872, |
| "loss/hidden": 3.51796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21433540284633637, |
| "step": 4900 |
| }, |
| { |
| "epoch": 0.491, |
| "grad_norm": 38.0, |
| "grad_norm_var": 50.01223958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1544, |
| "loss/crossentropy": 2.1096328511834144, |
| "loss/hidden": 3.61484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24193457532674073, |
| "step": 4910 |
| }, |
| { |
| "epoch": 0.492, |
| "grad_norm": 39.25, |
| "grad_norm_var": 17.08125, |
| "learning_rate": 0.0001, |
| "loss": 8.202, |
| "loss/crossentropy": 2.2372330710291863, |
| "loss/hidden": 3.63203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23599924352020024, |
| "step": 4920 |
| }, |
| { |
| "epoch": 0.493, |
| "grad_norm": 33.25, |
| "grad_norm_var": 12.742643229166667, |
| "learning_rate": 0.0001, |
| "loss": 8.1759, |
| "loss/crossentropy": 2.196950948238373, |
| "loss/hidden": 3.638671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24246960394084455, |
| "step": 4930 |
| }, |
| { |
| "epoch": 0.494, |
| "grad_norm": 40.5, |
| "grad_norm_var": 6.883072916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.1278, |
| "loss/crossentropy": 2.3560511782765388, |
| "loss/hidden": 3.633984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2382162045687437, |
| "step": 4940 |
| }, |
| { |
| "epoch": 0.495, |
| "grad_norm": 35.0, |
| "grad_norm_var": 11.6603515625, |
| "learning_rate": 0.0001, |
| "loss": 8.2982, |
| "loss/crossentropy": 2.3035822331905367, |
| "loss/hidden": 3.64140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2485156562179327, |
| "step": 4950 |
| }, |
| { |
| "epoch": 0.496, |
| "grad_norm": 34.25, |
| "grad_norm_var": 14.192708333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.1579, |
| "loss/crossentropy": 2.184766189754009, |
| "loss/hidden": 3.637109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23721186630427837, |
| "step": 4960 |
| }, |
| { |
| "epoch": 0.497, |
| "grad_norm": 33.5, |
| "grad_norm_var": 13.871809895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.1975, |
| "loss/crossentropy": 2.1983452700078487, |
| "loss/hidden": 3.697265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24081590361893176, |
| "step": 4970 |
| }, |
| { |
| "epoch": 0.498, |
| "grad_norm": 43.25, |
| "grad_norm_var": 13.948372395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.2262, |
| "loss/crossentropy": 2.3249034196138383, |
| "loss/hidden": 3.56875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.234759721159935, |
| "step": 4980 |
| }, |
| { |
| "epoch": 0.499, |
| "grad_norm": 31.5, |
| "grad_norm_var": 11.167708333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.1488, |
| "loss/crossentropy": 2.2298896074295045, |
| "loss/hidden": 3.6546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2505023546516895, |
| "step": 4990 |
| }, |
| { |
| "epoch": 0.5, |
| "grad_norm": 38.75, |
| "grad_norm_var": 10.43125, |
| "learning_rate": 0.0001, |
| "loss": 8.1769, |
| "loss/crossentropy": 2.201507803052664, |
| "loss/hidden": 3.520703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21926793903112413, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.501, |
| "grad_norm": 44.75, |
| "grad_norm_var": 16.2853515625, |
| "learning_rate": 0.0001, |
| "loss": 8.039, |
| "loss/crossentropy": 2.270238833874464, |
| "loss/hidden": 3.613671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23682384472340345, |
| "step": 5010 |
| }, |
| { |
| "epoch": 0.502, |
| "grad_norm": 31.75, |
| "grad_norm_var": 14.38515625, |
| "learning_rate": 0.0001, |
| "loss": 8.1599, |
| "loss/crossentropy": 2.108400362730026, |
| "loss/hidden": 3.718359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2417622933164239, |
| "step": 5020 |
| }, |
| { |
| "epoch": 0.503, |
| "grad_norm": 33.0, |
| "grad_norm_var": 2.8989583333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1698, |
| "loss/crossentropy": 2.3586475804448126, |
| "loss/hidden": 3.571875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23084877729415892, |
| "step": 5030 |
| }, |
| { |
| "epoch": 0.504, |
| "grad_norm": 32.5, |
| "grad_norm_var": 1.962513882217063e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.293, |
| "loss/crossentropy": 2.308009374141693, |
| "loss/hidden": 3.631640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25030199717730284, |
| "step": 5040 |
| }, |
| { |
| "epoch": 0.505, |
| "grad_norm": 34.25, |
| "grad_norm_var": 1.962513880139065e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.2269, |
| "loss/crossentropy": 2.2477807879447935, |
| "loss/hidden": 3.63046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22722288742661476, |
| "step": 5050 |
| }, |
| { |
| "epoch": 0.506, |
| "grad_norm": 31.0, |
| "grad_norm_var": 230.7962890625, |
| "learning_rate": 0.0001, |
| "loss": 8.0995, |
| "loss/crossentropy": 2.113031893968582, |
| "loss/hidden": 3.625390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2260434988886118, |
| "step": 5060 |
| }, |
| { |
| "epoch": 0.507, |
| "grad_norm": 29.5, |
| "grad_norm_var": 226.95462239583333, |
| "learning_rate": 0.0001, |
| "loss": 8.1254, |
| "loss/crossentropy": 2.1494341671466826, |
| "loss/hidden": 3.628125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2316014662384987, |
| "step": 5070 |
| }, |
| { |
| "epoch": 0.508, |
| "grad_norm": 42.25, |
| "grad_norm_var": 17.206184895833335, |
| "learning_rate": 0.0001, |
| "loss": 8.3222, |
| "loss/crossentropy": 2.409487584233284, |
| "loss/hidden": 3.51953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24138148501515388, |
| "step": 5080 |
| }, |
| { |
| "epoch": 0.509, |
| "grad_norm": 33.0, |
| "grad_norm_var": 15.231184895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.0576, |
| "loss/crossentropy": 2.285913223773241, |
| "loss/hidden": 3.528515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2232502717524767, |
| "step": 5090 |
| }, |
| { |
| "epoch": 0.51, |
| "grad_norm": 33.5, |
| "grad_norm_var": 15.065559895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.1747, |
| "loss/crossentropy": 2.180917738378048, |
| "loss/hidden": 3.56328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23142165634781123, |
| "step": 5100 |
| }, |
| { |
| "epoch": 0.511, |
| "grad_norm": 32.25, |
| "grad_norm_var": 13.1166015625, |
| "learning_rate": 0.0001, |
| "loss": 8.1417, |
| "loss/crossentropy": 2.226011593639851, |
| "loss/hidden": 3.506640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22545368764549495, |
| "step": 5110 |
| }, |
| { |
| "epoch": 0.512, |
| "grad_norm": 33.25, |
| "grad_norm_var": 4.7572265625, |
| "learning_rate": 0.0001, |
| "loss": 8.1568, |
| "loss/crossentropy": 2.2642487674951552, |
| "loss/hidden": 3.626953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24627051521092652, |
| "step": 5120 |
| }, |
| { |
| "epoch": 0.513, |
| "grad_norm": 29.375, |
| "grad_norm_var": 8.576822916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.1412, |
| "loss/crossentropy": 2.1210575878620146, |
| "loss/hidden": 3.603125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22232303582131863, |
| "step": 5130 |
| }, |
| { |
| "epoch": 0.514, |
| "grad_norm": 36.25, |
| "grad_norm_var": 7.51015625, |
| "learning_rate": 0.0001, |
| "loss": 8.1105, |
| "loss/crossentropy": 2.338705539703369, |
| "loss/hidden": 3.61796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2322886861860752, |
| "step": 5140 |
| }, |
| { |
| "epoch": 0.515, |
| "grad_norm": 32.25, |
| "grad_norm_var": 9.846809895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.1908, |
| "loss/crossentropy": 2.2111464768648146, |
| "loss/hidden": 3.72109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.26425624899566175, |
| "step": 5150 |
| }, |
| { |
| "epoch": 0.516, |
| "grad_norm": 28.875, |
| "grad_norm_var": 9.946809895833333, |
| "learning_rate": 0.0001, |
| "loss": 8.0375, |
| "loss/crossentropy": 2.176833947002888, |
| "loss/hidden": 3.54765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22021548971533775, |
| "step": 5160 |
| }, |
| { |
| "epoch": 0.517, |
| "grad_norm": 34.0, |
| "grad_norm_var": 4.706705729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.1521, |
| "loss/crossentropy": 2.1654283188283445, |
| "loss/hidden": 3.48203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2115080550312996, |
| "step": 5170 |
| }, |
| { |
| "epoch": 0.518, |
| "grad_norm": 30.25, |
| "grad_norm_var": 5.541666666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.13, |
| "loss/crossentropy": 2.2458599150180816, |
| "loss/hidden": 3.596484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22666897978633643, |
| "step": 5180 |
| }, |
| { |
| "epoch": 0.519, |
| "grad_norm": 44.75, |
| "grad_norm_var": 14.142122395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.145, |
| "loss/crossentropy": 2.317767137289047, |
| "loss/hidden": 3.5671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23820882234722376, |
| "step": 5190 |
| }, |
| { |
| "epoch": 0.52, |
| "grad_norm": 32.5, |
| "grad_norm_var": 23.218684895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.1323, |
| "loss/crossentropy": 2.279913380742073, |
| "loss/hidden": 3.558984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24836393278092145, |
| "step": 5200 |
| }, |
| { |
| "epoch": 0.521, |
| "grad_norm": 31.875, |
| "grad_norm_var": 16.846875, |
| "learning_rate": 0.0001, |
| "loss": 8.2041, |
| "loss/crossentropy": 2.201883518695831, |
| "loss/hidden": 3.57890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22954922150820495, |
| "step": 5210 |
| }, |
| { |
| "epoch": 0.522, |
| "grad_norm": 34.25, |
| "grad_norm_var": 221.16223958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.273, |
| "loss/crossentropy": 2.2926857471466064, |
| "loss/hidden": 3.541796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22256891019642353, |
| "step": 5220 |
| }, |
| { |
| "epoch": 0.523, |
| "grad_norm": 75.0, |
| "grad_norm_var": 233.03098958333334, |
| "learning_rate": 0.0001, |
| "loss": 8.1559, |
| "loss/crossentropy": 2.1607275292277337, |
| "loss/hidden": 3.65859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24259125851094723, |
| "step": 5230 |
| }, |
| { |
| "epoch": 0.524, |
| "grad_norm": 33.5, |
| "grad_norm_var": 170.5353515625, |
| "learning_rate": 0.0001, |
| "loss": 8.1314, |
| "loss/crossentropy": 2.2264937654137613, |
| "loss/hidden": 3.653125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23244266752153636, |
| "step": 5240 |
| }, |
| { |
| "epoch": 0.525, |
| "grad_norm": 34.0, |
| "grad_norm_var": 69.98515625, |
| "learning_rate": 0.0001, |
| "loss": 8.0757, |
| "loss/crossentropy": 2.263314816355705, |
| "loss/hidden": 3.6921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2497670866549015, |
| "step": 5250 |
| }, |
| { |
| "epoch": 0.526, |
| "grad_norm": 32.75, |
| "grad_norm_var": 15.91640625, |
| "learning_rate": 0.0001, |
| "loss": 8.1746, |
| "loss/crossentropy": 2.2392116367816923, |
| "loss/hidden": 3.659765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24461503997445105, |
| "step": 5260 |
| }, |
| { |
| "epoch": 0.527, |
| "grad_norm": 30.875, |
| "grad_norm_var": 118.5931640625, |
| "learning_rate": 0.0001, |
| "loss": 8.145, |
| "loss/crossentropy": 2.184428018331528, |
| "loss/hidden": 3.6265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24789600986987353, |
| "step": 5270 |
| }, |
| { |
| "epoch": 0.528, |
| "grad_norm": 40.5, |
| "grad_norm_var": 127.34791666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.0442, |
| "loss/crossentropy": 2.1109130561351774, |
| "loss/hidden": 3.528125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21775138471275568, |
| "step": 5280 |
| }, |
| { |
| "epoch": 0.529, |
| "grad_norm": 34.5, |
| "grad_norm_var": 6.623958333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1308, |
| "loss/crossentropy": 2.386467677354813, |
| "loss/hidden": 3.607421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24021831918507813, |
| "step": 5290 |
| }, |
| { |
| "epoch": 0.53, |
| "grad_norm": 34.25, |
| "grad_norm_var": 7.5666015625, |
| "learning_rate": 0.0001, |
| "loss": 8.1256, |
| "loss/crossentropy": 2.3109169751405716, |
| "loss/hidden": 3.630859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2501339312642813, |
| "step": 5300 |
| }, |
| { |
| "epoch": 0.531, |
| "grad_norm": 31.875, |
| "grad_norm_var": 12.195572916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.1229, |
| "loss/crossentropy": 2.346800622344017, |
| "loss/hidden": 3.608203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2536924373358488, |
| "step": 5310 |
| }, |
| { |
| "epoch": 0.532, |
| "grad_norm": 31.875, |
| "grad_norm_var": 9.997330729166666, |
| "learning_rate": 0.0001, |
| "loss": 8.0104, |
| "loss/crossentropy": 2.1395985931158066, |
| "loss/hidden": 3.471875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21648342311382293, |
| "step": 5320 |
| }, |
| { |
| "epoch": 0.533, |
| "grad_norm": 32.25, |
| "grad_norm_var": 54.80358072916667, |
| "learning_rate": 0.0001, |
| "loss": 8.2808, |
| "loss/crossentropy": 2.072698312997818, |
| "loss/hidden": 3.70625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21971321273595096, |
| "step": 5330 |
| }, |
| { |
| "epoch": 0.534, |
| "grad_norm": 37.75, |
| "grad_norm_var": 582.04140625, |
| "learning_rate": 0.0001, |
| "loss": 8.4396, |
| "loss/crossentropy": 2.168602865189314, |
| "loss/hidden": 3.68828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22807842567563058, |
| "step": 5340 |
| }, |
| { |
| "epoch": 0.535, |
| "grad_norm": 36.0, |
| "grad_norm_var": 17.0619140625, |
| "learning_rate": 0.0001, |
| "loss": 8.2354, |
| "loss/crossentropy": 2.212322035431862, |
| "loss/hidden": 3.579296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21836955063045024, |
| "step": 5350 |
| }, |
| { |
| "epoch": 0.536, |
| "grad_norm": 37.75, |
| "grad_norm_var": 18.780989583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.2718, |
| "loss/crossentropy": 2.1710034780204297, |
| "loss/hidden": 3.656640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2387496206909418, |
| "step": 5360 |
| }, |
| { |
| "epoch": 0.537, |
| "grad_norm": 29.125, |
| "grad_norm_var": 27.276041666666668, |
| "learning_rate": 0.0001, |
| "loss": 8.2691, |
| "loss/crossentropy": 2.097558119148016, |
| "loss/hidden": 3.680078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2329912935383618, |
| "step": 5370 |
| }, |
| { |
| "epoch": 0.538, |
| "grad_norm": 37.5, |
| "grad_norm_var": 18.0181640625, |
| "learning_rate": 0.0001, |
| "loss": 8.057, |
| "loss/crossentropy": 2.275981144607067, |
| "loss/hidden": 3.55703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22314830794930457, |
| "step": 5380 |
| }, |
| { |
| "epoch": 0.539, |
| "grad_norm": 34.25, |
| "grad_norm_var": 19.930208333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.182, |
| "loss/crossentropy": 2.184271165728569, |
| "loss/hidden": 3.6609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23054859917610884, |
| "step": 5390 |
| }, |
| { |
| "epoch": 0.54, |
| "grad_norm": 34.0, |
| "grad_norm_var": 17.333072916666666, |
| "learning_rate": 0.0001, |
| "loss": 8.3439, |
| "loss/crossentropy": 2.480144701898098, |
| "loss/hidden": 3.701171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2607876468449831, |
| "step": 5400 |
| }, |
| { |
| "epoch": 0.541, |
| "grad_norm": 33.75, |
| "grad_norm_var": 14.249739583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.2028, |
| "loss/crossentropy": 2.2421235501766206, |
| "loss/hidden": 3.637890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23026540800929068, |
| "step": 5410 |
| }, |
| { |
| "epoch": 0.542, |
| "grad_norm": 33.0, |
| "grad_norm_var": 14.408072916666667, |
| "learning_rate": 0.0001, |
| "loss": 8.122, |
| "loss/crossentropy": 2.173307144641876, |
| "loss/hidden": 3.64453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22846251353621483, |
| "step": 5420 |
| }, |
| { |
| "epoch": 0.543, |
| "grad_norm": 33.0, |
| "grad_norm_var": 5.3, |
| "learning_rate": 0.0001, |
| "loss": 8.2194, |
| "loss/crossentropy": 2.3099235713481905, |
| "loss/hidden": 3.629296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23289041519165038, |
| "step": 5430 |
| }, |
| { |
| "epoch": 0.544, |
| "grad_norm": 36.0, |
| "grad_norm_var": 28.040625, |
| "learning_rate": 0.0001, |
| "loss": 8.1837, |
| "loss/crossentropy": 2.2322505958378316, |
| "loss/hidden": 3.643359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2281514243222773, |
| "step": 5440 |
| }, |
| { |
| "epoch": 0.545, |
| "grad_norm": 30.0, |
| "grad_norm_var": 69.69166666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.1516, |
| "loss/crossentropy": 2.199453258514404, |
| "loss/hidden": 3.5890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22161313518881798, |
| "step": 5450 |
| }, |
| { |
| "epoch": 0.546, |
| "grad_norm": 36.25, |
| "grad_norm_var": 88.72890625, |
| "learning_rate": 0.0001, |
| "loss": 8.305, |
| "loss/crossentropy": 2.3247879207134248, |
| "loss/hidden": 3.74609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2331052988767624, |
| "step": 5460 |
| }, |
| { |
| "epoch": 0.547, |
| "grad_norm": 50.25, |
| "grad_norm_var": 49.67265625, |
| "learning_rate": 0.0001, |
| "loss": 8.1121, |
| "loss/crossentropy": 2.2907343961298468, |
| "loss/hidden": 3.598828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22796698454767467, |
| "step": 5470 |
| }, |
| { |
| "epoch": 0.548, |
| "grad_norm": 34.5, |
| "grad_norm_var": 27.73125, |
| "learning_rate": 0.0001, |
| "loss": 8.1348, |
| "loss/crossentropy": 2.4852760285139084, |
| "loss/hidden": 3.5296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23372339643537998, |
| "step": 5480 |
| }, |
| { |
| "epoch": 0.549, |
| "grad_norm": 33.0, |
| "grad_norm_var": 2.703285648974309e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.0635, |
| "loss/crossentropy": 2.3434904247522352, |
| "loss/hidden": 3.725390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21989205628633499, |
| "step": 5490 |
| }, |
| { |
| "epoch": 0.55, |
| "grad_norm": 30.5, |
| "grad_norm_var": 6.481184895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.1213, |
| "loss/crossentropy": 2.048020973801613, |
| "loss/hidden": 3.53046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21088404105976225, |
| "step": 5500 |
| }, |
| { |
| "epoch": 0.551, |
| "grad_norm": 41.0, |
| "grad_norm_var": 16.025, |
| "learning_rate": 0.0001, |
| "loss": 8.143, |
| "loss/crossentropy": 2.246925861388445, |
| "loss/hidden": 3.616796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2380803508684039, |
| "step": 5510 |
| }, |
| { |
| "epoch": 0.552, |
| "grad_norm": 34.0, |
| "grad_norm_var": 19.940625, |
| "learning_rate": 0.0001, |
| "loss": 8.1266, |
| "loss/crossentropy": 2.306339371204376, |
| "loss/hidden": 3.707421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2542409796267748, |
| "step": 5520 |
| }, |
| { |
| "epoch": 0.553, |
| "grad_norm": 30.25, |
| "grad_norm_var": 25.34375, |
| "learning_rate": 0.0001, |
| "loss": 8.1711, |
| "loss/crossentropy": 2.172018714249134, |
| "loss/hidden": 3.602734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24370079562067987, |
| "step": 5530 |
| }, |
| { |
| "epoch": 0.554, |
| "grad_norm": 42.25, |
| "grad_norm_var": 30.013541666666665, |
| "learning_rate": 0.0001, |
| "loss": 8.1492, |
| "loss/crossentropy": 2.22998360991478, |
| "loss/hidden": 3.625390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23992773257195948, |
| "step": 5540 |
| }, |
| { |
| "epoch": 0.555, |
| "grad_norm": 32.25, |
| "grad_norm_var": 28.613541666666666, |
| "learning_rate": 0.0001, |
| "loss": 8.0695, |
| "loss/crossentropy": 2.2180883288383484, |
| "loss/hidden": 3.598828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22950777132064104, |
| "step": 5550 |
| }, |
| { |
| "epoch": 0.556, |
| "grad_norm": 33.0, |
| "grad_norm_var": 34.692708333333336, |
| "learning_rate": 0.0001, |
| "loss": 8.1162, |
| "loss/crossentropy": 2.174109402298927, |
| "loss/hidden": 3.702734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24184305276721715, |
| "step": 5560 |
| }, |
| { |
| "epoch": 0.557, |
| "grad_norm": 34.0, |
| "grad_norm_var": 7.314583333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.0114, |
| "loss/crossentropy": 2.1175956279039383, |
| "loss/hidden": 3.53828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21476623937487602, |
| "step": 5570 |
| }, |
| { |
| "epoch": 0.558, |
| "grad_norm": 35.25, |
| "grad_norm_var": 19.8119140625, |
| "learning_rate": 0.0001, |
| "loss": 8.1025, |
| "loss/crossentropy": 2.2917901635169984, |
| "loss/hidden": 3.558984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22450571469962596, |
| "step": 5580 |
| }, |
| { |
| "epoch": 0.559, |
| "grad_norm": 38.5, |
| "grad_norm_var": 21.97890625, |
| "learning_rate": 0.0001, |
| "loss": 8.2385, |
| "loss/crossentropy": 2.3675080120563505, |
| "loss/hidden": 3.63828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23681335002183915, |
| "step": 5590 |
| }, |
| { |
| "epoch": 0.56, |
| "grad_norm": 33.0, |
| "grad_norm_var": 9.764583333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1467, |
| "loss/crossentropy": 2.3383423417806624, |
| "loss/hidden": 3.623046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23495447412133216, |
| "step": 5600 |
| }, |
| { |
| "epoch": 0.561, |
| "grad_norm": 34.25, |
| "grad_norm_var": 11.692643229166666, |
| "learning_rate": 0.0001, |
| "loss": 8.1075, |
| "loss/crossentropy": 2.2219532161951063, |
| "loss/hidden": 3.581640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21968780737370253, |
| "step": 5610 |
| }, |
| { |
| "epoch": 0.562, |
| "grad_norm": 29.875, |
| "grad_norm_var": 19.882747395833334, |
| "learning_rate": 0.0001, |
| "loss": 8.0262, |
| "loss/crossentropy": 2.121725457906723, |
| "loss/hidden": 3.564453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22616409026086332, |
| "step": 5620 |
| }, |
| { |
| "epoch": 0.563, |
| "grad_norm": 31.625, |
| "grad_norm_var": 1.869627140463989e+18, |
| "learning_rate": 0.0001, |
| "loss": 8.2844, |
| "loss/crossentropy": 2.2004577577114106, |
| "loss/hidden": 3.651171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22547926437109708, |
| "step": 5630 |
| }, |
| { |
| "epoch": 0.564, |
| "grad_norm": 32.0, |
| "grad_norm_var": 4.611458333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1244, |
| "loss/crossentropy": 2.1516154944896697, |
| "loss/hidden": 3.55078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2172885647043586, |
| "step": 5640 |
| }, |
| { |
| "epoch": 0.565, |
| "grad_norm": 30.5, |
| "grad_norm_var": 7.317708333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.0514, |
| "loss/crossentropy": 2.2103342309594156, |
| "loss/hidden": 3.6140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22790345773100854, |
| "step": 5650 |
| }, |
| { |
| "epoch": 0.566, |
| "grad_norm": 34.25, |
| "grad_norm_var": 13.6587890625, |
| "learning_rate": 0.0001, |
| "loss": 8.0864, |
| "loss/crossentropy": 2.2035325288772585, |
| "loss/hidden": 3.57734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23142583221197127, |
| "step": 5660 |
| }, |
| { |
| "epoch": 0.567, |
| "grad_norm": 31.625, |
| "grad_norm_var": 9.45625, |
| "learning_rate": 0.0001, |
| "loss": 8.0716, |
| "loss/crossentropy": 2.1144707940518854, |
| "loss/hidden": 3.555078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21162977050989867, |
| "step": 5670 |
| }, |
| { |
| "epoch": 0.568, |
| "grad_norm": 32.5, |
| "grad_norm_var": 31.39140625, |
| "learning_rate": 0.0001, |
| "loss": 8.096, |
| "loss/crossentropy": 2.1229008197784425, |
| "loss/hidden": 3.554296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2217079123482108, |
| "step": 5680 |
| }, |
| { |
| "epoch": 0.569, |
| "grad_norm": 35.5, |
| "grad_norm_var": 30.71875, |
| "learning_rate": 0.0001, |
| "loss": 8.0824, |
| "loss/crossentropy": 2.1256051540374754, |
| "loss/hidden": 3.53046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22356193587183953, |
| "step": 5690 |
| }, |
| { |
| "epoch": 0.57, |
| "grad_norm": 30.125, |
| "grad_norm_var": 6.362239583333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1334, |
| "loss/crossentropy": 2.1438381403684614, |
| "loss/hidden": 3.51875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2077854923903942, |
| "step": 5700 |
| }, |
| { |
| "epoch": 0.571, |
| "grad_norm": 32.5, |
| "grad_norm_var": 14.85390625, |
| "learning_rate": 0.0001, |
| "loss": 8.0718, |
| "loss/crossentropy": 2.2198398813605307, |
| "loss/hidden": 3.580859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22509159836918116, |
| "step": 5710 |
| }, |
| { |
| "epoch": 0.572, |
| "grad_norm": 34.75, |
| "grad_norm_var": 13.4759765625, |
| "learning_rate": 0.0001, |
| "loss": 8.0583, |
| "loss/crossentropy": 2.4060455739498137, |
| "loss/hidden": 3.708203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.25263102930039166, |
| "step": 5720 |
| }, |
| { |
| "epoch": 0.573, |
| "grad_norm": 28.625, |
| "grad_norm_var": 6.946809895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.0726, |
| "loss/crossentropy": 2.1238791063427924, |
| "loss/hidden": 3.578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21989983841776847, |
| "step": 5730 |
| }, |
| { |
| "epoch": 0.574, |
| "grad_norm": 33.75, |
| "grad_norm_var": 9.4884765625, |
| "learning_rate": 0.0001, |
| "loss": 8.1374, |
| "loss/crossentropy": 2.1622555539011956, |
| "loss/hidden": 3.583203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21868109367787839, |
| "step": 5740 |
| }, |
| { |
| "epoch": 0.575, |
| "grad_norm": 33.0, |
| "grad_norm_var": 16.939518229166666, |
| "learning_rate": 0.0001, |
| "loss": 7.9816, |
| "loss/crossentropy": 2.203578273952007, |
| "loss/hidden": 3.57109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22708230018615722, |
| "step": 5750 |
| }, |
| { |
| "epoch": 0.576, |
| "grad_norm": 33.5, |
| "grad_norm_var": 7.49765625, |
| "learning_rate": 0.0001, |
| "loss": 8.0534, |
| "loss/crossentropy": 2.0832700729370117, |
| "loss/hidden": 3.5890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2166902620345354, |
| "step": 5760 |
| }, |
| { |
| "epoch": 0.577, |
| "grad_norm": 38.5, |
| "grad_norm_var": 7.069205729166667, |
| "learning_rate": 0.0001, |
| "loss": 8.1567, |
| "loss/crossentropy": 2.1000006228685377, |
| "loss/hidden": 3.6328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2189541209489107, |
| "step": 5770 |
| }, |
| { |
| "epoch": 0.578, |
| "grad_norm": 34.0, |
| "grad_norm_var": 10.34140625, |
| "learning_rate": 0.0001, |
| "loss": 7.9902, |
| "loss/crossentropy": 2.2755674168467523, |
| "loss/hidden": 3.5046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2158666817471385, |
| "step": 5780 |
| }, |
| { |
| "epoch": 0.579, |
| "grad_norm": 31.0, |
| "grad_norm_var": 7.744791666666667, |
| "learning_rate": 0.0001, |
| "loss": 8.049, |
| "loss/crossentropy": 2.3444782361388206, |
| "loss/hidden": 3.565625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22829483300447465, |
| "step": 5790 |
| }, |
| { |
| "epoch": 0.58, |
| "grad_norm": 31.75, |
| "grad_norm_var": 6.076497395833333, |
| "learning_rate": 0.0001, |
| "loss": 8.0271, |
| "loss/crossentropy": 2.202429711073637, |
| "loss/hidden": 3.4640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21492673214524985, |
| "step": 5800 |
| }, |
| { |
| "epoch": 0.581, |
| "grad_norm": 31.0, |
| "grad_norm_var": 3.6811848958333333, |
| "learning_rate": 0.0001, |
| "loss": 7.8948, |
| "loss/crossentropy": 2.3126792818307877, |
| "loss/hidden": 3.46015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22085475884377956, |
| "step": 5810 |
| }, |
| { |
| "epoch": 0.582, |
| "grad_norm": 33.0, |
| "grad_norm_var": 3.542708333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1146, |
| "loss/crossentropy": 2.264694780111313, |
| "loss/hidden": 3.47421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21074176728725433, |
| "step": 5820 |
| }, |
| { |
| "epoch": 0.583, |
| "grad_norm": 31.5, |
| "grad_norm_var": 2.3452473958333333, |
| "learning_rate": 0.0001, |
| "loss": 8.079, |
| "loss/crossentropy": 2.2641124561429025, |
| "loss/hidden": 3.5125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22780102472752334, |
| "step": 5830 |
| }, |
| { |
| "epoch": 0.584, |
| "grad_norm": 32.5, |
| "grad_norm_var": 14.5947265625, |
| "learning_rate": 0.0001, |
| "loss": 8.0337, |
| "loss/crossentropy": 2.218563383817673, |
| "loss/hidden": 3.62734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24179403018206358, |
| "step": 5840 |
| }, |
| { |
| "epoch": 0.585, |
| "grad_norm": 32.75, |
| "grad_norm_var": 3.1705729166666665, |
| "learning_rate": 0.0001, |
| "loss": 8.0352, |
| "loss/crossentropy": 2.2127060025930403, |
| "loss/hidden": 3.51015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22846792116761208, |
| "step": 5850 |
| }, |
| { |
| "epoch": 0.586, |
| "grad_norm": 31.0, |
| "grad_norm_var": 5.418489583333334, |
| "learning_rate": 0.0001, |
| "loss": 8.0163, |
| "loss/crossentropy": 2.2808433353900908, |
| "loss/hidden": 3.579296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23100998885929586, |
| "step": 5860 |
| }, |
| { |
| "epoch": 0.587, |
| "grad_norm": 33.75, |
| "grad_norm_var": 7.083333333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.0979, |
| "loss/crossentropy": 2.335478585958481, |
| "loss/hidden": 3.541015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2336500260978937, |
| "step": 5870 |
| }, |
| { |
| "epoch": 0.588, |
| "grad_norm": 31.5, |
| "grad_norm_var": 46.71243489583333, |
| "learning_rate": 0.0001, |
| "loss": 8.065, |
| "loss/crossentropy": 2.1413629055023193, |
| "loss/hidden": 3.46171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21667953655123712, |
| "step": 5880 |
| }, |
| { |
| "epoch": 0.589, |
| "grad_norm": 33.25, |
| "grad_norm_var": 90.58515625, |
| "learning_rate": 0.0001, |
| "loss": 8.1265, |
| "loss/crossentropy": 2.3575997933745385, |
| "loss/hidden": 3.5171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23772844970226287, |
| "step": 5890 |
| }, |
| { |
| "epoch": 0.59, |
| "grad_norm": 42.25, |
| "grad_norm_var": 108.78020833333333, |
| "learning_rate": 0.0001, |
| "loss": 8.1898, |
| "loss/crossentropy": 2.31534286737442, |
| "loss/hidden": 3.5765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23516609705984592, |
| "step": 5900 |
| }, |
| { |
| "epoch": 0.591, |
| "grad_norm": 30.125, |
| "grad_norm_var": 96.6228515625, |
| "learning_rate": 0.0001, |
| "loss": 8.2098, |
| "loss/crossentropy": 2.3408817887306212, |
| "loss/hidden": 3.58515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.234616519510746, |
| "step": 5910 |
| }, |
| { |
| "epoch": 0.592, |
| "grad_norm": 34.5, |
| "grad_norm_var": 5.761458333333334, |
| "learning_rate": 0.0001, |
| "loss": 8.0412, |
| "loss/crossentropy": 2.3171244740486143, |
| "loss/hidden": 3.690234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24309027940034866, |
| "step": 5920 |
| }, |
| { |
| "epoch": 0.593, |
| "grad_norm": 31.875, |
| "grad_norm_var": 18.249934895833334, |
| "learning_rate": 0.0001, |
| "loss": 8.2448, |
| "loss/crossentropy": 2.3113586097955703, |
| "loss/hidden": 3.537890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2302385514602065, |
| "step": 5930 |
| }, |
| { |
| "epoch": 0.594, |
| "grad_norm": 29.625, |
| "grad_norm_var": 16.43125, |
| "learning_rate": 0.0001, |
| "loss": 7.9336, |
| "loss/crossentropy": 2.325835222005844, |
| "loss/hidden": 3.434765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21180371306836604, |
| "step": 5940 |
| }, |
| { |
| "epoch": 0.595, |
| "grad_norm": 30.5, |
| "grad_norm_var": 10.299739583333333, |
| "learning_rate": 0.0001, |
| "loss": 7.9796, |
| "loss/crossentropy": 2.0509180039167405, |
| "loss/hidden": 3.604296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2253203097730875, |
| "step": 5950 |
| }, |
| { |
| "epoch": 0.596, |
| "grad_norm": 35.5, |
| "grad_norm_var": 2.948958333333333, |
| "learning_rate": 0.0001, |
| "loss": 8.0105, |
| "loss/crossentropy": 2.320768731832504, |
| "loss/hidden": 3.509375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2229237537831068, |
| "step": 5960 |
| }, |
| { |
| "epoch": 0.597, |
| "grad_norm": 28.375, |
| "grad_norm_var": 5.462434895833334, |
| "learning_rate": 0.0001, |
| "loss": 7.9407, |
| "loss/crossentropy": 2.3377193987369536, |
| "loss/hidden": 3.551171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22489294074475766, |
| "step": 5970 |
| }, |
| { |
| "epoch": 0.598, |
| "grad_norm": 37.25, |
| "grad_norm_var": 10.767643229166667, |
| "learning_rate": 0.0001, |
| "loss": 7.9551, |
| "loss/crossentropy": 2.26409173309803, |
| "loss/hidden": 3.5734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23901313543319702, |
| "step": 5980 |
| }, |
| { |
| "epoch": 0.599, |
| "grad_norm": 35.0, |
| "grad_norm_var": 14.5375, |
| "learning_rate": 0.0001, |
| "loss": 7.8592, |
| "loss/crossentropy": 2.2190980166196823, |
| "loss/hidden": 3.554296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22315293960273266, |
| "step": 5990 |
| }, |
| { |
| "epoch": 0.6, |
| "grad_norm": 31.0, |
| "grad_norm_var": 11.592643229166667, |
| "learning_rate": 0.0001, |
| "loss": 8.0295, |
| "loss/crossentropy": 2.3040059447288512, |
| "loss/hidden": 3.491015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22013801857829093, |
| "step": 6000 |
| }, |
| { |
| "epoch": 0.601, |
| "grad_norm": 35.25, |
| "grad_norm_var": 3.64375, |
| "learning_rate": 9.999861209401554e-05, |
| "loss": 8.0943, |
| "loss/crossentropy": 1.9778777174651623, |
| "loss/hidden": 3.649609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2236777282319963, |
| "step": 6010 |
| }, |
| { |
| "epoch": 0.602, |
| "grad_norm": 31.875, |
| "grad_norm_var": 15.4681640625, |
| "learning_rate": 9.999444846167473e-05, |
| "loss": 8.0478, |
| "loss/crossentropy": 2.1667948998510838, |
| "loss/hidden": 3.52265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21487980112433433, |
| "step": 6020 |
| }, |
| { |
| "epoch": 0.603, |
| "grad_norm": 33.75, |
| "grad_norm_var": 6.246875, |
| "learning_rate": 9.998750935981003e-05, |
| "loss": 7.896, |
| "loss/crossentropy": 2.338212412595749, |
| "loss/hidden": 3.546484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24026264138519765, |
| "step": 6030 |
| }, |
| { |
| "epoch": 0.604, |
| "grad_norm": 34.75, |
| "grad_norm_var": 19.437434895833334, |
| "learning_rate": 9.997779521645793e-05, |
| "loss": 8.0332, |
| "loss/crossentropy": 2.107576106488705, |
| "loss/hidden": 3.59296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22513998029753565, |
| "step": 6040 |
| }, |
| { |
| "epoch": 0.605, |
| "grad_norm": 31.625, |
| "grad_norm_var": 18.728580729166666, |
| "learning_rate": 9.996530663083255e-05, |
| "loss": 8.0406, |
| "loss/crossentropy": 2.15678728222847, |
| "loss/hidden": 3.5578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22172329109162092, |
| "step": 6050 |
| }, |
| { |
| "epoch": 0.606, |
| "grad_norm": 35.75, |
| "grad_norm_var": 4.276041666666667, |
| "learning_rate": 9.995004437328867e-05, |
| "loss": 8.0676, |
| "loss/crossentropy": 2.1027655113488435, |
| "loss/hidden": 3.505078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2081753826700151, |
| "step": 6060 |
| }, |
| { |
| "epoch": 0.607, |
| "grad_norm": 33.0, |
| "grad_norm_var": 15.7822265625, |
| "learning_rate": 9.993200938527422e-05, |
| "loss": 7.9934, |
| "loss/crossentropy": 2.2116799533367155, |
| "loss/hidden": 3.478515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2213600728660822, |
| "step": 6070 |
| }, |
| { |
| "epoch": 0.608, |
| "grad_norm": 33.75, |
| "grad_norm_var": 16.8009765625, |
| "learning_rate": 9.991120277927223e-05, |
| "loss": 7.9668, |
| "loss/crossentropy": 2.317891779541969, |
| "loss/hidden": 3.463671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21628273129463196, |
| "step": 6080 |
| }, |
| { |
| "epoch": 0.609, |
| "grad_norm": 35.5, |
| "grad_norm_var": 40.41848958333333, |
| "learning_rate": 9.988762583873216e-05, |
| "loss": 8.1108, |
| "loss/crossentropy": 2.2937388941645622, |
| "loss/hidden": 3.57734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24392955824732782, |
| "step": 6090 |
| }, |
| { |
| "epoch": 0.61, |
| "grad_norm": 32.75, |
| "grad_norm_var": 45.71139322916667, |
| "learning_rate": 9.986128001799077e-05, |
| "loss": 7.9542, |
| "loss/crossentropy": 2.1343540251255035, |
| "loss/hidden": 3.569921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21609723400324582, |
| "step": 6100 |
| }, |
| { |
| "epoch": 0.611, |
| "grad_norm": 36.5, |
| "grad_norm_var": 15.710416666666667, |
| "learning_rate": 9.983216694218236e-05, |
| "loss": 8.0213, |
| "loss/crossentropy": 2.2557659655809403, |
| "loss/hidden": 3.57265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23297818191349506, |
| "step": 6110 |
| }, |
| { |
| "epoch": 0.612, |
| "grad_norm": 29.75, |
| "grad_norm_var": 5.277018229166667, |
| "learning_rate": 9.980028840713861e-05, |
| "loss": 7.9736, |
| "loss/crossentropy": 2.266719642281532, |
| "loss/hidden": 3.5078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23359888847917318, |
| "step": 6120 |
| }, |
| { |
| "epoch": 0.613, |
| "grad_norm": 32.5, |
| "grad_norm_var": 5.777083333333334, |
| "learning_rate": 9.97656463792777e-05, |
| "loss": 8.0245, |
| "loss/crossentropy": 2.160386872291565, |
| "loss/hidden": 3.416015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2051708722487092, |
| "step": 6130 |
| }, |
| { |
| "epoch": 0.614, |
| "grad_norm": 30.25, |
| "grad_norm_var": 3.147330729166667, |
| "learning_rate": 9.97282429954831e-05, |
| "loss": 7.9544, |
| "loss/crossentropy": 2.3059318214654922, |
| "loss/hidden": 3.55234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22518980633467436, |
| "step": 6140 |
| }, |
| { |
| "epoch": 0.615, |
| "grad_norm": 35.5, |
| "grad_norm_var": 4.796875, |
| "learning_rate": 9.96880805629717e-05, |
| "loss": 8.0717, |
| "loss/crossentropy": 2.1113718140870334, |
| "loss/hidden": 3.504296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2189197548199445, |
| "step": 6150 |
| }, |
| { |
| "epoch": 0.616, |
| "grad_norm": 36.0, |
| "grad_norm_var": 5.901822916666666, |
| "learning_rate": 9.964516155915151e-05, |
| "loss": 8.0206, |
| "loss/crossentropy": 2.2431884676218035, |
| "loss/hidden": 3.5359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22374145314097404, |
| "step": 6160 |
| }, |
| { |
| "epoch": 0.617, |
| "grad_norm": 32.5, |
| "grad_norm_var": 7.320247395833333, |
| "learning_rate": 9.959948863146887e-05, |
| "loss": 8.1478, |
| "loss/crossentropy": 2.226063944399357, |
| "loss/hidden": 3.57265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23062228299677373, |
| "step": 6170 |
| }, |
| { |
| "epoch": 0.618, |
| "grad_norm": 32.0, |
| "grad_norm_var": 4.3009765625, |
| "learning_rate": 9.95510645972451e-05, |
| "loss": 8.0136, |
| "loss/crossentropy": 2.3054941266775133, |
| "loss/hidden": 3.554296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2221404740586877, |
| "step": 6180 |
| }, |
| { |
| "epoch": 0.619, |
| "grad_norm": 30.375, |
| "grad_norm_var": 4.343489583333334, |
| "learning_rate": 9.949989244350271e-05, |
| "loss": 8.0065, |
| "loss/crossentropy": 2.243282663822174, |
| "loss/hidden": 3.56328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22724493965506554, |
| "step": 6190 |
| }, |
| { |
| "epoch": 0.62, |
| "grad_norm": 32.25, |
| "grad_norm_var": 3.2697265625, |
| "learning_rate": 9.94459753267812e-05, |
| "loss": 7.9606, |
| "loss/crossentropy": 2.209024667739868, |
| "loss/hidden": 3.48125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2209360484033823, |
| "step": 6200 |
| }, |
| { |
| "epoch": 0.621, |
| "grad_norm": 31.75, |
| "grad_norm_var": 3.57890625, |
| "learning_rate": 9.93893165729423e-05, |
| "loss": 8.0263, |
| "loss/crossentropy": 2.249543938040733, |
| "loss/hidden": 3.55625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21819613687694073, |
| "step": 6210 |
| }, |
| { |
| "epoch": 0.622, |
| "grad_norm": 35.5, |
| "grad_norm_var": 2.4344770438731136e+18, |
| "learning_rate": 9.932991967696483e-05, |
| "loss": 8.15, |
| "loss/crossentropy": 2.268956796824932, |
| "loss/hidden": 3.45, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21720595508813859, |
| "step": 6220 |
| }, |
| { |
| "epoch": 0.623, |
| "grad_norm": 32.5, |
| "grad_norm_var": 2.4344770473252357e+18, |
| "learning_rate": 9.926778830272912e-05, |
| "loss": 7.8604, |
| "loss/crossentropy": 2.275849539041519, |
| "loss/hidden": 3.48046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20637927446514368, |
| "step": 6230 |
| }, |
| { |
| "epoch": 0.624, |
| "grad_norm": 34.0, |
| "grad_norm_var": 7.830208333333333, |
| "learning_rate": 9.920292628279099e-05, |
| "loss": 8.1704, |
| "loss/crossentropy": 2.3654884546995163, |
| "loss/hidden": 3.56953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24397928323596715, |
| "step": 6240 |
| }, |
| { |
| "epoch": 0.625, |
| "grad_norm": 33.5, |
| "grad_norm_var": 13.568489583333333, |
| "learning_rate": 9.913533761814537e-05, |
| "loss": 7.9571, |
| "loss/crossentropy": 2.158632145822048, |
| "loss/hidden": 3.58671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24105301443487406, |
| "step": 6250 |
| }, |
| { |
| "epoch": 0.626, |
| "grad_norm": 30.625, |
| "grad_norm_var": 19.718489583333334, |
| "learning_rate": 9.906502647797946e-05, |
| "loss": 8.014, |
| "loss/crossentropy": 2.4216948479413984, |
| "loss/hidden": 3.524609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23831300530582666, |
| "step": 6260 |
| }, |
| { |
| "epoch": 0.627, |
| "grad_norm": 33.25, |
| "grad_norm_var": 5.412434895833333, |
| "learning_rate": 9.899199719941559e-05, |
| "loss": 7.9918, |
| "loss/crossentropy": 2.312904378771782, |
| "loss/hidden": 3.4046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2172198722139001, |
| "step": 6270 |
| }, |
| { |
| "epoch": 0.628, |
| "grad_norm": 30.75, |
| "grad_norm_var": 13.770247395833334, |
| "learning_rate": 9.891625428724363e-05, |
| "loss": 7.8961, |
| "loss/crossentropy": 2.1163319811224937, |
| "loss/hidden": 3.453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2137595346197486, |
| "step": 6280 |
| }, |
| { |
| "epoch": 0.629, |
| "grad_norm": 40.0, |
| "grad_norm_var": 17.7884765625, |
| "learning_rate": 9.883780241364324e-05, |
| "loss": 7.9233, |
| "loss/crossentropy": 2.1940839856863024, |
| "loss/hidden": 3.467578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22429651636630296, |
| "step": 6290 |
| }, |
| { |
| "epoch": 0.63, |
| "grad_norm": 34.75, |
| "grad_norm_var": 10.233333333333333, |
| "learning_rate": 9.875664641789545e-05, |
| "loss": 7.9085, |
| "loss/crossentropy": 2.227154319733381, |
| "loss/hidden": 3.551171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20596644701436162, |
| "step": 6300 |
| }, |
| { |
| "epoch": 0.631, |
| "grad_norm": 38.0, |
| "grad_norm_var": 7.104622395833333, |
| "learning_rate": 9.867279130608441e-05, |
| "loss": 7.9829, |
| "loss/crossentropy": 2.035653126239777, |
| "loss/hidden": 3.515234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22415168713778258, |
| "step": 6310 |
| }, |
| { |
| "epoch": 0.632, |
| "grad_norm": 35.0, |
| "grad_norm_var": 23.770572916666666, |
| "learning_rate": 9.858624225078841e-05, |
| "loss": 8.0008, |
| "loss/crossentropy": 2.1803553849458694, |
| "loss/hidden": 3.42890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2177688382565975, |
| "step": 6320 |
| }, |
| { |
| "epoch": 0.633, |
| "grad_norm": 31.125, |
| "grad_norm_var": 11.841666666666667, |
| "learning_rate": 9.849700459076084e-05, |
| "loss": 8.0075, |
| "loss/crossentropy": 2.176757436990738, |
| "loss/hidden": 3.520703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21793113015592097, |
| "step": 6330 |
| }, |
| { |
| "epoch": 0.634, |
| "grad_norm": 32.5, |
| "grad_norm_var": 22.920572916666668, |
| "learning_rate": 9.840508383060093e-05, |
| "loss": 7.9591, |
| "loss/crossentropy": 2.1699598588049414, |
| "loss/hidden": 3.510546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22454311894252896, |
| "step": 6340 |
| }, |
| { |
| "epoch": 0.635, |
| "grad_norm": 31.25, |
| "grad_norm_var": 13.63515625, |
| "learning_rate": 9.831048564041413e-05, |
| "loss": 8.0201, |
| "loss/crossentropy": 2.1979137234389783, |
| "loss/hidden": 3.4484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20997896678745748, |
| "step": 6350 |
| }, |
| { |
| "epoch": 0.636, |
| "grad_norm": 29.5, |
| "grad_norm_var": 4.823893229166667, |
| "learning_rate": 9.821321585546244e-05, |
| "loss": 7.999, |
| "loss/crossentropy": 2.2549417853355407, |
| "loss/hidden": 3.488671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21141785085201265, |
| "step": 6360 |
| }, |
| { |
| "epoch": 0.637, |
| "grad_norm": 39.25, |
| "grad_norm_var": 12.4650390625, |
| "learning_rate": 9.811328047580437e-05, |
| "loss": 8.0388, |
| "loss/crossentropy": 2.239156422019005, |
| "loss/hidden": 3.60625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.24648461528122426, |
| "step": 6370 |
| }, |
| { |
| "epoch": 0.638, |
| "grad_norm": 47.25, |
| "grad_norm_var": 25.014322916666668, |
| "learning_rate": 9.801068566592485e-05, |
| "loss": 7.9025, |
| "loss/crossentropy": 2.1251075878739356, |
| "loss/hidden": 3.579296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21702531222254037, |
| "step": 6380 |
| }, |
| { |
| "epoch": 0.639, |
| "grad_norm": 30.5, |
| "grad_norm_var": 20.876041666666666, |
| "learning_rate": 9.790543775435505e-05, |
| "loss": 7.8769, |
| "loss/crossentropy": 2.0513882406055926, |
| "loss/hidden": 3.529296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20966944433748722, |
| "step": 6390 |
| }, |
| { |
| "epoch": 0.64, |
| "grad_norm": 31.875, |
| "grad_norm_var": 5.6416015625, |
| "learning_rate": 9.779754323328192e-05, |
| "loss": 7.9332, |
| "loss/crossentropy": 2.3918124705553057, |
| "loss/hidden": 3.5328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22760416194796562, |
| "step": 6400 |
| }, |
| { |
| "epoch": 0.641, |
| "grad_norm": 28.875, |
| "grad_norm_var": 7.597330729166667, |
| "learning_rate": 9.76870087581477e-05, |
| "loss": 7.9651, |
| "loss/crossentropy": 2.0368143267929555, |
| "loss/hidden": 3.55, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21210087668150662, |
| "step": 6410 |
| }, |
| { |
| "epoch": 0.642, |
| "grad_norm": 32.25, |
| "grad_norm_var": 9.109309895833333, |
| "learning_rate": 9.757384114723954e-05, |
| "loss": 8.0321, |
| "loss/crossentropy": 2.211759842187166, |
| "loss/hidden": 3.527734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22823408897966146, |
| "step": 6420 |
| }, |
| { |
| "epoch": 0.643, |
| "grad_norm": 28.375, |
| "grad_norm_var": 7.3775390625, |
| "learning_rate": 9.745804738126871e-05, |
| "loss": 7.9925, |
| "loss/crossentropy": 2.326525205373764, |
| "loss/hidden": 3.47109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22555426377803087, |
| "step": 6430 |
| }, |
| { |
| "epoch": 0.644, |
| "grad_norm": 35.75, |
| "grad_norm_var": 11.24375, |
| "learning_rate": 9.733963460294015e-05, |
| "loss": 7.9035, |
| "loss/crossentropy": 2.3097167551517486, |
| "loss/hidden": 3.581640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22632835302501916, |
| "step": 6440 |
| }, |
| { |
| "epoch": 0.645, |
| "grad_norm": 30.5, |
| "grad_norm_var": 8.674739583333333, |
| "learning_rate": 9.72186101165118e-05, |
| "loss": 7.946, |
| "loss/crossentropy": 2.008643839508295, |
| "loss/hidden": 3.56875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23042700625956059, |
| "step": 6450 |
| }, |
| { |
| "epoch": 0.646, |
| "grad_norm": 29.875, |
| "grad_norm_var": 4.911393229166666, |
| "learning_rate": 9.709498138734405e-05, |
| "loss": 8.0037, |
| "loss/crossentropy": 2.1890437103807927, |
| "loss/hidden": 3.444921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2202771688811481, |
| "step": 6460 |
| }, |
| { |
| "epoch": 0.647, |
| "grad_norm": 29.25, |
| "grad_norm_var": 3.709830729166667, |
| "learning_rate": 9.696875604143924e-05, |
| "loss": 7.8795, |
| "loss/crossentropy": 2.1602062940597535, |
| "loss/hidden": 3.576171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2190501932054758, |
| "step": 6470 |
| }, |
| { |
| "epoch": 0.648, |
| "grad_norm": 28.875, |
| "grad_norm_var": 11.384375, |
| "learning_rate": 9.683994186497132e-05, |
| "loss": 8.0117, |
| "loss/crossentropy": 2.2572616577148437, |
| "loss/hidden": 3.54296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21902738623321055, |
| "step": 6480 |
| }, |
| { |
| "epoch": 0.649, |
| "grad_norm": 33.75, |
| "grad_norm_var": 14.962239583333334, |
| "learning_rate": 9.670854680380544e-05, |
| "loss": 7.9967, |
| "loss/crossentropy": 2.2480128668248653, |
| "loss/hidden": 3.46875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21361452052369714, |
| "step": 6490 |
| }, |
| { |
| "epoch": 0.65, |
| "grad_norm": 30.5, |
| "grad_norm_var": 13.709309895833334, |
| "learning_rate": 9.657457896300791e-05, |
| "loss": 7.8606, |
| "loss/crossentropy": 2.3608870059251785, |
| "loss/hidden": 3.43671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2066267790272832, |
| "step": 6500 |
| }, |
| { |
| "epoch": 0.651, |
| "grad_norm": 31.25, |
| "grad_norm_var": 6.6869140625, |
| "learning_rate": 9.643804660634618e-05, |
| "loss": 7.8916, |
| "loss/crossentropy": 2.22277874648571, |
| "loss/hidden": 3.583984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2291982416063547, |
| "step": 6510 |
| }, |
| { |
| "epoch": 0.652, |
| "grad_norm": 33.0, |
| "grad_norm_var": 12.510416666666666, |
| "learning_rate": 9.629895815577916e-05, |
| "loss": 7.8982, |
| "loss/crossentropy": 2.181501531600952, |
| "loss/hidden": 3.487109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2111635560169816, |
| "step": 6520 |
| }, |
| { |
| "epoch": 0.653, |
| "grad_norm": 33.25, |
| "grad_norm_var": 6.308072916666666, |
| "learning_rate": 9.615732219093762e-05, |
| "loss": 7.8706, |
| "loss/crossentropy": 2.2949966207146644, |
| "loss/hidden": 3.447265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21308735199272633, |
| "step": 6530 |
| }, |
| { |
| "epoch": 0.654, |
| "grad_norm": 31.125, |
| "grad_norm_var": 7.6837890625, |
| "learning_rate": 9.601314744859504e-05, |
| "loss": 7.9133, |
| "loss/crossentropy": 2.3161712542176245, |
| "loss/hidden": 3.4296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21109349709004163, |
| "step": 6540 |
| }, |
| { |
| "epoch": 0.655, |
| "grad_norm": 29.375, |
| "grad_norm_var": 12.726822916666666, |
| "learning_rate": 9.586644282212866e-05, |
| "loss": 8.0585, |
| "loss/crossentropy": 2.472279852628708, |
| "loss/hidden": 3.501171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2305774724110961, |
| "step": 6550 |
| }, |
| { |
| "epoch": 0.656, |
| "grad_norm": 31.875, |
| "grad_norm_var": 23.931184895833333, |
| "learning_rate": 9.571721736097089e-05, |
| "loss": 8.1202, |
| "loss/crossentropy": 2.19906941652298, |
| "loss/hidden": 3.46640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2118291700258851, |
| "step": 6560 |
| }, |
| { |
| "epoch": 0.657, |
| "grad_norm": 30.5, |
| "grad_norm_var": 16.205989583333334, |
| "learning_rate": 9.556548027005106e-05, |
| "loss": 7.9494, |
| "loss/crossentropy": 2.2343359500169755, |
| "loss/hidden": 3.546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22837236057966948, |
| "step": 6570 |
| }, |
| { |
| "epoch": 0.658, |
| "grad_norm": 36.75, |
| "grad_norm_var": 7.095247395833334, |
| "learning_rate": 9.54112409092277e-05, |
| "loss": 8.0652, |
| "loss/crossentropy": 2.2837061405181887, |
| "loss/hidden": 3.6140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22944757621735334, |
| "step": 6580 |
| }, |
| { |
| "epoch": 0.659, |
| "grad_norm": 33.0, |
| "grad_norm_var": 8.152018229166666, |
| "learning_rate": 9.525450879271113e-05, |
| "loss": 7.864, |
| "loss/crossentropy": 2.1913695335388184, |
| "loss/hidden": 3.40859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20043510757386684, |
| "step": 6590 |
| }, |
| { |
| "epoch": 0.66, |
| "grad_norm": 31.125, |
| "grad_norm_var": 7.634309895833334, |
| "learning_rate": 9.509529358847655e-05, |
| "loss": 7.8478, |
| "loss/crossentropy": 2.260852278769016, |
| "loss/hidden": 3.473046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21337826307862998, |
| "step": 6600 |
| }, |
| { |
| "epoch": 0.661, |
| "grad_norm": 31.875, |
| "grad_norm_var": 2.4364583333333334, |
| "learning_rate": 9.493360511766776e-05, |
| "loss": 8.002, |
| "loss/crossentropy": 2.278784583508968, |
| "loss/hidden": 3.47109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21644247248768805, |
| "step": 6610 |
| }, |
| { |
| "epoch": 0.662, |
| "grad_norm": 32.0, |
| "grad_norm_var": 2.2604166666666665, |
| "learning_rate": 9.476945335399122e-05, |
| "loss": 7.8403, |
| "loss/crossentropy": 2.305820995569229, |
| "loss/hidden": 3.46796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2129553822800517, |
| "step": 6620 |
| }, |
| { |
| "epoch": 0.663, |
| "grad_norm": 29.875, |
| "grad_norm_var": 1.9775390625, |
| "learning_rate": 9.460284842310094e-05, |
| "loss": 7.8949, |
| "loss/crossentropy": 2.3204252138733863, |
| "loss/hidden": 3.68359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23965017758309842, |
| "step": 6630 |
| }, |
| { |
| "epoch": 0.664, |
| "grad_norm": 46.75, |
| "grad_norm_var": 19.216666666666665, |
| "learning_rate": 9.443380060197387e-05, |
| "loss": 8.0113, |
| "loss/crossentropy": 2.4102306962013245, |
| "loss/hidden": 3.605078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23021724484860898, |
| "step": 6640 |
| }, |
| { |
| "epoch": 0.665, |
| "grad_norm": 31.875, |
| "grad_norm_var": 19.167643229166668, |
| "learning_rate": 9.426232031827588e-05, |
| "loss": 7.966, |
| "loss/crossentropy": 2.0162095427513123, |
| "loss/hidden": 3.461328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20972911100834607, |
| "step": 6650 |
| }, |
| { |
| "epoch": 0.666, |
| "grad_norm": 31.0, |
| "grad_norm_var": 6.491666666666666, |
| "learning_rate": 9.408841814971861e-05, |
| "loss": 7.842, |
| "loss/crossentropy": 2.2311264127492905, |
| "loss/hidden": 3.528125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22341707665473223, |
| "step": 6660 |
| }, |
| { |
| "epoch": 0.667, |
| "grad_norm": 30.875, |
| "grad_norm_var": 20.850455729166665, |
| "learning_rate": 9.391210482340699e-05, |
| "loss": 8.0171, |
| "loss/crossentropy": 2.232379700243473, |
| "loss/hidden": 3.530859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22980675138533116, |
| "step": 6670 |
| }, |
| { |
| "epoch": 0.668, |
| "grad_norm": 29.75, |
| "grad_norm_var": 3.72265625, |
| "learning_rate": 9.373339121517747e-05, |
| "loss": 7.7597, |
| "loss/crossentropy": 2.245617154240608, |
| "loss/hidden": 3.379296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20726991835981606, |
| "step": 6680 |
| }, |
| { |
| "epoch": 0.669, |
| "grad_norm": 29.25, |
| "grad_norm_var": 9.530989583333334, |
| "learning_rate": 9.355228834892724e-05, |
| "loss": 7.8524, |
| "loss/crossentropy": 2.254352422058582, |
| "loss/hidden": 3.52890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22110722698271273, |
| "step": 6690 |
| }, |
| { |
| "epoch": 0.67, |
| "grad_norm": 30.125, |
| "grad_norm_var": 11.1447265625, |
| "learning_rate": 9.336880739593416e-05, |
| "loss": 7.8409, |
| "loss/crossentropy": 2.2463474199175835, |
| "loss/hidden": 3.501171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21668503917753695, |
| "step": 6700 |
| }, |
| { |
| "epoch": 0.671, |
| "grad_norm": 27.625, |
| "grad_norm_var": 16.1822265625, |
| "learning_rate": 9.318295967416768e-05, |
| "loss": 7.8072, |
| "loss/crossentropy": 2.2114156976342203, |
| "loss/hidden": 3.390234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2030044339597225, |
| "step": 6710 |
| }, |
| { |
| "epoch": 0.672, |
| "grad_norm": 29.875, |
| "grad_norm_var": 17.50390625, |
| "learning_rate": 9.299475664759069e-05, |
| "loss": 7.8587, |
| "loss/crossentropy": 2.233850121498108, |
| "loss/hidden": 3.415625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21453158278018236, |
| "step": 6720 |
| }, |
| { |
| "epoch": 0.673, |
| "grad_norm": 32.0, |
| "grad_norm_var": 9.415625, |
| "learning_rate": 9.280420992545239e-05, |
| "loss": 7.7826, |
| "loss/crossentropy": 2.2446775883436203, |
| "loss/hidden": 3.443359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2181243021041155, |
| "step": 6730 |
| }, |
| { |
| "epoch": 0.674, |
| "grad_norm": 31.5, |
| "grad_norm_var": 2.9622395833333335, |
| "learning_rate": 9.261133126157218e-05, |
| "loss": 7.8484, |
| "loss/crossentropy": 2.286695083975792, |
| "loss/hidden": 3.55390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21459419634193183, |
| "step": 6740 |
| }, |
| { |
| "epoch": 0.675, |
| "grad_norm": 31.375, |
| "grad_norm_var": 1.5893229166666667, |
| "learning_rate": 9.241613255361455e-05, |
| "loss": 7.8822, |
| "loss/crossentropy": 2.1801554679870607, |
| "loss/hidden": 3.518359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22029934152960778, |
| "step": 6750 |
| }, |
| { |
| "epoch": 0.676, |
| "grad_norm": 29.375, |
| "grad_norm_var": 13.489322916666667, |
| "learning_rate": 9.221862584235528e-05, |
| "loss": 7.8426, |
| "loss/crossentropy": 2.1505274340510367, |
| "loss/hidden": 3.5609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2274538179859519, |
| "step": 6760 |
| }, |
| { |
| "epoch": 0.677, |
| "grad_norm": 39.0, |
| "grad_norm_var": 15.030143229166667, |
| "learning_rate": 9.20188233109387e-05, |
| "loss": 7.837, |
| "loss/crossentropy": 2.3796315863728523, |
| "loss/hidden": 3.42578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2178290465846658, |
| "step": 6770 |
| }, |
| { |
| "epoch": 0.678, |
| "grad_norm": 28.75, |
| "grad_norm_var": 19.102083333333333, |
| "learning_rate": 9.181673728412605e-05, |
| "loss": 7.8801, |
| "loss/crossentropy": 2.2974090576171875, |
| "loss/hidden": 3.3703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21345944330096245, |
| "step": 6780 |
| }, |
| { |
| "epoch": 0.679, |
| "grad_norm": 30.0, |
| "grad_norm_var": 1.4692057291666667, |
| "learning_rate": 9.161238022753543e-05, |
| "loss": 7.8258, |
| "loss/crossentropy": 2.314460426568985, |
| "loss/hidden": 3.46953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22287285774946214, |
| "step": 6790 |
| }, |
| { |
| "epoch": 0.68, |
| "grad_norm": 32.0, |
| "grad_norm_var": 2.5254557291666666, |
| "learning_rate": 9.140576474687264e-05, |
| "loss": 7.8657, |
| "loss/crossentropy": 2.1074823416769504, |
| "loss/hidden": 3.544140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2290237602777779, |
| "step": 6800 |
| }, |
| { |
| "epoch": 0.681, |
| "grad_norm": 27.875, |
| "grad_norm_var": 8.753059895833333, |
| "learning_rate": 9.11969035871538e-05, |
| "loss": 7.7752, |
| "loss/crossentropy": 2.0863692626357078, |
| "loss/hidden": 3.39296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20366193084046244, |
| "step": 6810 |
| }, |
| { |
| "epoch": 0.682, |
| "grad_norm": 29.875, |
| "grad_norm_var": 6.998893229166667, |
| "learning_rate": 9.098580963191908e-05, |
| "loss": 7.8898, |
| "loss/crossentropy": 2.2119090080261232, |
| "loss/hidden": 3.5390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2230028085410595, |
| "step": 6820 |
| }, |
| { |
| "epoch": 0.683, |
| "grad_norm": 28.0, |
| "grad_norm_var": 27.265559895833334, |
| "learning_rate": 9.077249590243796e-05, |
| "loss": 7.8278, |
| "loss/crossentropy": 2.314791253209114, |
| "loss/hidden": 3.441015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21508586294949056, |
| "step": 6830 |
| }, |
| { |
| "epoch": 0.684, |
| "grad_norm": 29.75, |
| "grad_norm_var": 2.4322265625, |
| "learning_rate": 9.055697555690608e-05, |
| "loss": 7.8087, |
| "loss/crossentropy": 2.1545565724372864, |
| "loss/hidden": 3.504296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2171786168590188, |
| "step": 6840 |
| }, |
| { |
| "epoch": 0.685, |
| "grad_norm": 34.5, |
| "grad_norm_var": 6.248372395833333, |
| "learning_rate": 9.033926188963352e-05, |
| "loss": 7.8663, |
| "loss/crossentropy": 2.3174977868795397, |
| "loss/hidden": 3.441015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21389166973531246, |
| "step": 6850 |
| }, |
| { |
| "epoch": 0.686, |
| "grad_norm": 31.625, |
| "grad_norm_var": 3.4760416666666667, |
| "learning_rate": 9.011936833022484e-05, |
| "loss": 7.812, |
| "loss/crossentropy": 2.1643209010362625, |
| "loss/hidden": 3.61015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2265725104138255, |
| "step": 6860 |
| }, |
| { |
| "epoch": 0.687, |
| "grad_norm": 46.0, |
| "grad_norm_var": 2.305350403439006e+18, |
| "learning_rate": 8.989730844275054e-05, |
| "loss": 7.93, |
| "loss/crossentropy": 2.2903329521417617, |
| "loss/hidden": 3.47890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21145460121333598, |
| "step": 6870 |
| }, |
| { |
| "epoch": 0.688, |
| "grad_norm": 29.5, |
| "grad_norm_var": 2.3053504035085965e+18, |
| "learning_rate": 8.967309592491052e-05, |
| "loss": 7.8319, |
| "loss/crossentropy": 2.1312880881130694, |
| "loss/hidden": 3.5453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.23307712767273187, |
| "step": 6880 |
| }, |
| { |
| "epoch": 0.689, |
| "grad_norm": 28.5, |
| "grad_norm_var": 1.8455729166666666, |
| "learning_rate": 8.944674460718897e-05, |
| "loss": 7.7086, |
| "loss/crossentropy": 2.212802681326866, |
| "loss/hidden": 3.41328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2138791922479868, |
| "step": 6890 |
| }, |
| { |
| "epoch": 0.69, |
| "grad_norm": 31.375, |
| "grad_norm_var": 36.62473958333333, |
| "learning_rate": 8.921826845200139e-05, |
| "loss": 7.6737, |
| "loss/crossentropy": 2.196607070416212, |
| "loss/hidden": 3.46875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21539878509938717, |
| "step": 6900 |
| }, |
| { |
| "epoch": 0.691, |
| "grad_norm": 30.5, |
| "grad_norm_var": 36.222330729166664, |
| "learning_rate": 8.898768155283327e-05, |
| "loss": 7.6532, |
| "loss/crossentropy": 1.9995386362075807, |
| "loss/hidden": 3.491796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19842760860919953, |
| "step": 6910 |
| }, |
| { |
| "epoch": 0.692, |
| "grad_norm": 34.0, |
| "grad_norm_var": 15.049739583333333, |
| "learning_rate": 8.875499813337069e-05, |
| "loss": 7.6989, |
| "loss/crossentropy": 2.1321763381361962, |
| "loss/hidden": 3.3546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20554839987307788, |
| "step": 6920 |
| }, |
| { |
| "epoch": 0.693, |
| "grad_norm": 34.5, |
| "grad_norm_var": 12.14140625, |
| "learning_rate": 8.852023254662299e-05, |
| "loss": 7.7572, |
| "loss/crossentropy": 2.294306147098541, |
| "loss/hidden": 3.38984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20451737083494664, |
| "step": 6930 |
| }, |
| { |
| "epoch": 0.694, |
| "grad_norm": 29.125, |
| "grad_norm_var": 3.5056640625, |
| "learning_rate": 8.828339927403745e-05, |
| "loss": 7.9034, |
| "loss/crossentropy": 2.4660038471221926, |
| "loss/hidden": 3.53828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22238681241869926, |
| "step": 6940 |
| }, |
| { |
| "epoch": 0.695, |
| "grad_norm": 29.875, |
| "grad_norm_var": 2.9291666666666667, |
| "learning_rate": 8.804451292460585e-05, |
| "loss": 7.667, |
| "loss/crossentropy": 2.2809097945690153, |
| "loss/hidden": 3.379296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21668226066976787, |
| "step": 6950 |
| }, |
| { |
| "epoch": 0.696, |
| "grad_norm": 43.0, |
| "grad_norm_var": 13.6650390625, |
| "learning_rate": 8.780358823396352e-05, |
| "loss": 7.8654, |
| "loss/crossentropy": 2.203066258132458, |
| "loss/hidden": 3.431640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20034241620451212, |
| "step": 6960 |
| }, |
| { |
| "epoch": 0.697, |
| "grad_norm": 29.375, |
| "grad_norm_var": 13.476822916666666, |
| "learning_rate": 8.756064006348021e-05, |
| "loss": 7.7268, |
| "loss/crossentropy": 2.3059852838516237, |
| "loss/hidden": 3.362109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20583362504839897, |
| "step": 6970 |
| }, |
| { |
| "epoch": 0.698, |
| "grad_norm": 29.75, |
| "grad_norm_var": 2.7666666666666666, |
| "learning_rate": 8.731568339934349e-05, |
| "loss": 7.6686, |
| "loss/crossentropy": 2.146751108765602, |
| "loss/hidden": 3.36171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20131726199761033, |
| "step": 6980 |
| }, |
| { |
| "epoch": 0.699, |
| "grad_norm": 78.0, |
| "grad_norm_var": 149.79973958333332, |
| "learning_rate": 8.706873335163425e-05, |
| "loss": 7.7995, |
| "loss/crossentropy": 2.1803457379341125, |
| "loss/hidden": 3.45546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20246275123208762, |
| "step": 6990 |
| }, |
| { |
| "epoch": 0.7, |
| "grad_norm": 30.875, |
| "grad_norm_var": 151.25, |
| "learning_rate": 8.681980515339464e-05, |
| "loss": 7.7526, |
| "loss/crossentropy": 2.2291737273335457, |
| "loss/hidden": 3.494921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22119418531656265, |
| "step": 7000 |
| }, |
| { |
| "epoch": 0.701, |
| "grad_norm": 29.25, |
| "grad_norm_var": 1.7181640625, |
| "learning_rate": 8.656891415968851e-05, |
| "loss": 7.746, |
| "loss/crossentropy": 2.1491103902459145, |
| "loss/hidden": 3.52421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2262929579243064, |
| "step": 7010 |
| }, |
| { |
| "epoch": 0.702, |
| "grad_norm": 29.625, |
| "grad_norm_var": 2.3692057291666666, |
| "learning_rate": 8.631607584665414e-05, |
| "loss": 7.8135, |
| "loss/crossentropy": 2.24861903488636, |
| "loss/hidden": 3.387890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21290562488138676, |
| "step": 7020 |
| }, |
| { |
| "epoch": 0.703, |
| "grad_norm": 28.375, |
| "grad_norm_var": 3.7874348958333335, |
| "learning_rate": 8.606130581054967e-05, |
| "loss": 7.7292, |
| "loss/crossentropy": 2.253491559624672, |
| "loss/hidden": 3.466015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21812653560191392, |
| "step": 7030 |
| }, |
| { |
| "epoch": 0.704, |
| "grad_norm": 28.25, |
| "grad_norm_var": 3.6302083333333335, |
| "learning_rate": 8.5804619766791e-05, |
| "loss": 7.6905, |
| "loss/crossentropy": 2.220169448852539, |
| "loss/hidden": 3.409765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2117680374532938, |
| "step": 7040 |
| }, |
| { |
| "epoch": 0.705, |
| "grad_norm": 28.5, |
| "grad_norm_var": 2.9921223958333334, |
| "learning_rate": 8.554603354898238e-05, |
| "loss": 7.7344, |
| "loss/crossentropy": 2.320455204695463, |
| "loss/hidden": 3.4328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2154739665798843, |
| "step": 7050 |
| }, |
| { |
| "epoch": 0.706, |
| "grad_norm": 29.125, |
| "grad_norm_var": 5.070572916666666, |
| "learning_rate": 8.52855631079398e-05, |
| "loss": 7.7402, |
| "loss/crossentropy": 2.145579095184803, |
| "loss/hidden": 3.522265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22706612963229417, |
| "step": 7060 |
| }, |
| { |
| "epoch": 0.707, |
| "grad_norm": 33.75, |
| "grad_norm_var": 6.292122395833333, |
| "learning_rate": 8.502322451070698e-05, |
| "loss": 7.6167, |
| "loss/crossentropy": 2.1853789925575255, |
| "loss/hidden": 3.398828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20946646444499492, |
| "step": 7070 |
| }, |
| { |
| "epoch": 0.708, |
| "grad_norm": 26.5, |
| "grad_norm_var": 2665.2580729166666, |
| "learning_rate": 8.475903393956434e-05, |
| "loss": 7.7827, |
| "loss/crossentropy": 2.153721308708191, |
| "loss/hidden": 3.465625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22963123507797717, |
| "step": 7080 |
| }, |
| { |
| "epoch": 0.709, |
| "grad_norm": 28.25, |
| "grad_norm_var": 2646.9572916666666, |
| "learning_rate": 8.449300769103072e-05, |
| "loss": 7.7356, |
| "loss/crossentropy": 2.3446192115545275, |
| "loss/hidden": 3.318359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20571632999926806, |
| "step": 7090 |
| }, |
| { |
| "epoch": 0.71, |
| "grad_norm": 28.0, |
| "grad_norm_var": 3.1264973958333333, |
| "learning_rate": 8.422516217485826e-05, |
| "loss": 7.6287, |
| "loss/crossentropy": 2.0742784813046455, |
| "loss/hidden": 3.4234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19434763304889202, |
| "step": 7100 |
| }, |
| { |
| "epoch": 0.711, |
| "grad_norm": 31.875, |
| "grad_norm_var": 11.266080729166667, |
| "learning_rate": 8.395551391302005e-05, |
| "loss": 7.7378, |
| "loss/crossentropy": 2.299386578798294, |
| "loss/hidden": 3.454296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21604814752936363, |
| "step": 7110 |
| }, |
| { |
| "epoch": 0.712, |
| "grad_norm": 26.0, |
| "grad_norm_var": 13.764322916666666, |
| "learning_rate": 8.368407953869104e-05, |
| "loss": 7.7025, |
| "loss/crossentropy": 2.3335316255688667, |
| "loss/hidden": 3.40859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20577862840145827, |
| "step": 7120 |
| }, |
| { |
| "epoch": 0.713, |
| "grad_norm": 31.25, |
| "grad_norm_var": 13.109309895833333, |
| "learning_rate": 8.3410875795222e-05, |
| "loss": 7.6393, |
| "loss/crossentropy": 2.1279921546578406, |
| "loss/hidden": 3.40078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1942439589649439, |
| "step": 7130 |
| }, |
| { |
| "epoch": 0.714, |
| "grad_norm": 29.25, |
| "grad_norm_var": 2.7275390625, |
| "learning_rate": 8.313591953510675e-05, |
| "loss": 7.5204, |
| "loss/crossentropy": 2.2679887428879737, |
| "loss/hidden": 3.31484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20306757390499114, |
| "step": 7140 |
| }, |
| { |
| "epoch": 0.715, |
| "grad_norm": 29.25, |
| "grad_norm_var": 1.6723307291666667, |
| "learning_rate": 8.285922771894254e-05, |
| "loss": 7.6251, |
| "loss/crossentropy": 2.279427632689476, |
| "loss/hidden": 3.371484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21383309215307236, |
| "step": 7150 |
| }, |
| { |
| "epoch": 0.716, |
| "grad_norm": 27.75, |
| "grad_norm_var": 1.8122395833333333, |
| "learning_rate": 8.258081741438395e-05, |
| "loss": 7.5708, |
| "loss/crossentropy": 2.2502959579229356, |
| "loss/hidden": 3.4046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20335796754807234, |
| "step": 7160 |
| }, |
| { |
| "epoch": 0.717, |
| "grad_norm": 28.875, |
| "grad_norm_var": 2.6822265625, |
| "learning_rate": 8.230070579508998e-05, |
| "loss": 7.6801, |
| "loss/crossentropy": 2.1685364373028277, |
| "loss/hidden": 3.380078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20438444875180722, |
| "step": 7170 |
| }, |
| { |
| "epoch": 0.718, |
| "grad_norm": 28.625, |
| "grad_norm_var": 3.81640625, |
| "learning_rate": 8.201891013966478e-05, |
| "loss": 7.5397, |
| "loss/crossentropy": 2.1127954378724096, |
| "loss/hidden": 3.278125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18592897318303586, |
| "step": 7180 |
| }, |
| { |
| "epoch": 0.719, |
| "grad_norm": 26.5, |
| "grad_norm_var": 3.56640625, |
| "learning_rate": 8.173544783059173e-05, |
| "loss": 7.5438, |
| "loss/crossentropy": 2.224238908290863, |
| "loss/hidden": 3.38984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2044967941939831, |
| "step": 7190 |
| }, |
| { |
| "epoch": 0.72, |
| "grad_norm": 28.375, |
| "grad_norm_var": 12.155989583333334, |
| "learning_rate": 8.14503363531613e-05, |
| "loss": 7.4945, |
| "loss/crossentropy": 2.2011847496032715, |
| "loss/hidden": 3.234765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18837413098663092, |
| "step": 7200 |
| }, |
| { |
| "epoch": 0.721, |
| "grad_norm": 30.25, |
| "grad_norm_var": 12.370572916666667, |
| "learning_rate": 8.116359329439238e-05, |
| "loss": 7.5481, |
| "loss/crossentropy": 2.2355149149894715, |
| "loss/hidden": 3.361328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19618814308196306, |
| "step": 7210 |
| }, |
| { |
| "epoch": 0.722, |
| "grad_norm": 29.25, |
| "grad_norm_var": 9.8837890625, |
| "learning_rate": 8.087523634194755e-05, |
| "loss": 7.691, |
| "loss/crossentropy": 2.2584028095006943, |
| "loss/hidden": 3.501953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.22446858156472443, |
| "step": 7220 |
| }, |
| { |
| "epoch": 0.723, |
| "grad_norm": 28.75, |
| "grad_norm_var": 1.8872395833333333, |
| "learning_rate": 8.058528328304188e-05, |
| "loss": 7.5763, |
| "loss/crossentropy": 2.16469197049737, |
| "loss/hidden": 3.320703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20104804309085011, |
| "step": 7230 |
| }, |
| { |
| "epoch": 0.724, |
| "grad_norm": 30.125, |
| "grad_norm_var": 5.76640625, |
| "learning_rate": 8.029375200334588e-05, |
| "loss": 7.6155, |
| "loss/crossentropy": 2.359184819459915, |
| "loss/hidden": 3.3703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21082259565591813, |
| "step": 7240 |
| }, |
| { |
| "epoch": 0.725, |
| "grad_norm": 26.375, |
| "grad_norm_var": 2.3379557291666666, |
| "learning_rate": 8.000066048588211e-05, |
| "loss": 7.5525, |
| "loss/crossentropy": 2.0703198462724686, |
| "loss/hidden": 3.421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19247690346091986, |
| "step": 7250 |
| }, |
| { |
| "epoch": 0.726, |
| "grad_norm": 30.0, |
| "grad_norm_var": 3.1747395833333334, |
| "learning_rate": 7.970602680991594e-05, |
| "loss": 7.666, |
| "loss/crossentropy": 2.1073908656835556, |
| "loss/hidden": 3.36875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20847180765122175, |
| "step": 7260 |
| }, |
| { |
| "epoch": 0.727, |
| "grad_norm": 37.25, |
| "grad_norm_var": 10.416666666666666, |
| "learning_rate": 7.940986914984034e-05, |
| "loss": 7.6802, |
| "loss/crossentropy": 2.1649453431367873, |
| "loss/hidden": 3.4703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20917554441839456, |
| "step": 7270 |
| }, |
| { |
| "epoch": 0.728, |
| "grad_norm": 29.25, |
| "grad_norm_var": 9.468684895833333, |
| "learning_rate": 7.911220577405484e-05, |
| "loss": 7.5714, |
| "loss/crossentropy": 2.327304023504257, |
| "loss/hidden": 3.34609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20701518896967172, |
| "step": 7280 |
| }, |
| { |
| "epoch": 0.729, |
| "grad_norm": 27.375, |
| "grad_norm_var": 4.26875, |
| "learning_rate": 7.881305504383858e-05, |
| "loss": 7.5349, |
| "loss/crossentropy": 2.3284592747688295, |
| "loss/hidden": 3.3546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20663589648902417, |
| "step": 7290 |
| }, |
| { |
| "epoch": 0.73, |
| "grad_norm": 26.75, |
| "grad_norm_var": 1.3541666666666667, |
| "learning_rate": 7.85124354122177e-05, |
| "loss": 7.5198, |
| "loss/crossentropy": 2.1882129050791264, |
| "loss/hidden": 3.255859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18851547054946421, |
| "step": 7300 |
| }, |
| { |
| "epoch": 0.731, |
| "grad_norm": 24.125, |
| "grad_norm_var": 6.3056640625, |
| "learning_rate": 7.821036542282714e-05, |
| "loss": 7.4597, |
| "loss/crossentropy": 2.2642627701163294, |
| "loss/hidden": 3.2890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2040542172268033, |
| "step": 7310 |
| }, |
| { |
| "epoch": 0.732, |
| "grad_norm": 27.25, |
| "grad_norm_var": 6.921875, |
| "learning_rate": 7.790686370876671e-05, |
| "loss": 7.6182, |
| "loss/crossentropy": 2.323049134016037, |
| "loss/hidden": 3.34375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19615541007369758, |
| "step": 7320 |
| }, |
| { |
| "epoch": 0.733, |
| "grad_norm": 26.375, |
| "grad_norm_var": 2.01640625, |
| "learning_rate": 7.760194899145176e-05, |
| "loss": 7.6029, |
| "loss/crossentropy": 2.192954847216606, |
| "loss/hidden": 3.304296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19462318085134028, |
| "step": 7330 |
| }, |
| { |
| "epoch": 0.734, |
| "grad_norm": 25.75, |
| "grad_norm_var": 3.754946529022024e+18, |
| "learning_rate": 7.729564007945835e-05, |
| "loss": 7.5679, |
| "loss/crossentropy": 2.2880126923322677, |
| "loss/hidden": 3.3265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20459149368107318, |
| "step": 7340 |
| }, |
| { |
| "epoch": 0.735, |
| "grad_norm": 27.375, |
| "grad_norm_var": 3.754946529441874e+18, |
| "learning_rate": 7.698795586736298e-05, |
| "loss": 7.4691, |
| "loss/crossentropy": 2.2379926055669785, |
| "loss/hidden": 3.358984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20208234172314404, |
| "step": 7350 |
| }, |
| { |
| "epoch": 0.736, |
| "grad_norm": 25.375, |
| "grad_norm_var": 1.1379557291666667, |
| "learning_rate": 7.667891533457719e-05, |
| "loss": 7.475, |
| "loss/crossentropy": 2.191740782558918, |
| "loss/hidden": 3.2578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1825101263821125, |
| "step": 7360 |
| }, |
| { |
| "epoch": 0.737, |
| "grad_norm": 28.5, |
| "grad_norm_var": 6.686458333333333, |
| "learning_rate": 7.636853754417676e-05, |
| "loss": 7.4922, |
| "loss/crossentropy": 2.215168985724449, |
| "loss/hidden": 3.278515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1965383216738701, |
| "step": 7370 |
| }, |
| { |
| "epoch": 0.738, |
| "grad_norm": 26.0, |
| "grad_norm_var": 2.3, |
| "learning_rate": 7.60568416417258e-05, |
| "loss": 7.3822, |
| "loss/crossentropy": 2.033752679824829, |
| "loss/hidden": 3.313671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18001100299879907, |
| "step": 7380 |
| }, |
| { |
| "epoch": 0.739, |
| "grad_norm": 27.0, |
| "grad_norm_var": 0.8629557291666666, |
| "learning_rate": 7.574384685409581e-05, |
| "loss": 7.5314, |
| "loss/crossentropy": 2.2958304405212404, |
| "loss/hidden": 3.351953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19766351543366908, |
| "step": 7390 |
| }, |
| { |
| "epoch": 0.74, |
| "grad_norm": 30.25, |
| "grad_norm_var": 22.751822916666665, |
| "learning_rate": 7.542957248827961e-05, |
| "loss": 7.5496, |
| "loss/crossentropy": 2.1121088288724423, |
| "loss/hidden": 3.32578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2019992781803012, |
| "step": 7400 |
| }, |
| { |
| "epoch": 0.741, |
| "grad_norm": 26.375, |
| "grad_norm_var": 21.520247395833334, |
| "learning_rate": 7.511403793020047e-05, |
| "loss": 7.5124, |
| "loss/crossentropy": 2.120064043998718, |
| "loss/hidden": 3.375390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21207549944519996, |
| "step": 7410 |
| }, |
| { |
| "epoch": 0.742, |
| "grad_norm": 27.875, |
| "grad_norm_var": 7.146809895833333, |
| "learning_rate": 7.479726264351618e-05, |
| "loss": 7.4119, |
| "loss/crossentropy": 2.250824949145317, |
| "loss/hidden": 3.257421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1866712011396885, |
| "step": 7420 |
| }, |
| { |
| "epoch": 0.743, |
| "grad_norm": 25.125, |
| "grad_norm_var": 3.443489583333333, |
| "learning_rate": 7.447926616841863e-05, |
| "loss": 7.4143, |
| "loss/crossentropy": 2.309866726398468, |
| "loss/hidden": 3.325, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1999421713873744, |
| "step": 7430 |
| }, |
| { |
| "epoch": 0.744, |
| "grad_norm": 28.75, |
| "grad_norm_var": 2.020247395833333, |
| "learning_rate": 7.416006812042828e-05, |
| "loss": 7.4387, |
| "loss/crossentropy": 2.2461145386099814, |
| "loss/hidden": 3.257421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18069201447069644, |
| "step": 7440 |
| }, |
| { |
| "epoch": 0.745, |
| "grad_norm": 26.125, |
| "grad_norm_var": 4.74140625, |
| "learning_rate": 7.383968818918426e-05, |
| "loss": 7.44, |
| "loss/crossentropy": 2.134336383640766, |
| "loss/hidden": 3.3109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1897894937545061, |
| "step": 7450 |
| }, |
| { |
| "epoch": 0.746, |
| "grad_norm": 27.875, |
| "grad_norm_var": 6.017643229166667, |
| "learning_rate": 7.35181461372299e-05, |
| "loss": 7.4134, |
| "loss/crossentropy": 2.169717122614384, |
| "loss/hidden": 3.359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19159721322357653, |
| "step": 7460 |
| }, |
| { |
| "epoch": 0.747, |
| "grad_norm": 27.75, |
| "grad_norm_var": 3.1775390625, |
| "learning_rate": 7.319546179879353e-05, |
| "loss": 7.5367, |
| "loss/crossentropy": 2.3293275028467177, |
| "loss/hidden": 3.377734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21772474870085717, |
| "step": 7470 |
| }, |
| { |
| "epoch": 0.748, |
| "grad_norm": 25.625, |
| "grad_norm_var": 4.45, |
| "learning_rate": 7.287165507856512e-05, |
| "loss": 7.4626, |
| "loss/crossentropy": 2.397631025314331, |
| "loss/hidden": 3.228125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19252193979918958, |
| "step": 7480 |
| }, |
| { |
| "epoch": 0.749, |
| "grad_norm": 27.0, |
| "grad_norm_var": 0.67265625, |
| "learning_rate": 7.254674595046847e-05, |
| "loss": 7.4285, |
| "loss/crossentropy": 2.141611284017563, |
| "loss/hidden": 3.334375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18560682162642478, |
| "step": 7490 |
| }, |
| { |
| "epoch": 0.75, |
| "grad_norm": 25.5, |
| "grad_norm_var": 4.889518229166667, |
| "learning_rate": 7.222075445642904e-05, |
| "loss": 7.3644, |
| "loss/crossentropy": 2.2735727220773696, |
| "loss/hidden": 3.3375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20446046069264412, |
| "step": 7500 |
| }, |
| { |
| "epoch": 0.751, |
| "grad_norm": 26.125, |
| "grad_norm_var": 2.8181640625, |
| "learning_rate": 7.189370070513775e-05, |
| "loss": 7.3532, |
| "loss/crossentropy": 2.1920345708727837, |
| "loss/hidden": 3.27109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18652982264757156, |
| "step": 7510 |
| }, |
| { |
| "epoch": 0.752, |
| "grad_norm": 24.5, |
| "grad_norm_var": 3.9556640625, |
| "learning_rate": 7.156560487081053e-05, |
| "loss": 7.4617, |
| "loss/crossentropy": 2.2119674131274225, |
| "loss/hidden": 3.33828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20092478170990943, |
| "step": 7520 |
| }, |
| { |
| "epoch": 0.753, |
| "grad_norm": 24.875, |
| "grad_norm_var": 4.035872395833334, |
| "learning_rate": 7.123648719194389e-05, |
| "loss": 7.4093, |
| "loss/crossentropy": 2.0915577203035354, |
| "loss/hidden": 3.195703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17126586921513082, |
| "step": 7530 |
| }, |
| { |
| "epoch": 0.754, |
| "grad_norm": 25.75, |
| "grad_norm_var": 2.4098307291666665, |
| "learning_rate": 7.090636797006658e-05, |
| "loss": 7.3297, |
| "loss/crossentropy": 2.198517936468124, |
| "loss/hidden": 3.45078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20143353156745433, |
| "step": 7540 |
| }, |
| { |
| "epoch": 0.755, |
| "grad_norm": 25.0, |
| "grad_norm_var": 1.9552083333333334, |
| "learning_rate": 7.057526756848719e-05, |
| "loss": 7.3531, |
| "loss/crossentropy": 2.08135302066803, |
| "loss/hidden": 3.2609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1978357734158635, |
| "step": 7550 |
| }, |
| { |
| "epoch": 0.756, |
| "grad_norm": 27.0, |
| "grad_norm_var": 1.8671223958333334, |
| "learning_rate": 7.024320641103812e-05, |
| "loss": 7.3367, |
| "loss/crossentropy": 2.217959429323673, |
| "loss/hidden": 3.180078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1959148071706295, |
| "step": 7560 |
| }, |
| { |
| "epoch": 0.757, |
| "grad_norm": 27.0, |
| "grad_norm_var": 1.3468098958333334, |
| "learning_rate": 6.991020498081569e-05, |
| "loss": 7.4209, |
| "loss/crossentropy": 2.301048618555069, |
| "loss/hidden": 3.284375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19627294316887856, |
| "step": 7570 |
| }, |
| { |
| "epoch": 0.758, |
| "grad_norm": 25.875, |
| "grad_norm_var": 1.1947916666666667, |
| "learning_rate": 6.957628381891673e-05, |
| "loss": 7.5366, |
| "loss/crossentropy": 2.0502139650285245, |
| "loss/hidden": 3.297265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18287195675075055, |
| "step": 7580 |
| }, |
| { |
| "epoch": 0.759, |
| "grad_norm": 26.875, |
| "grad_norm_var": 1.6488932291666667, |
| "learning_rate": 6.924146352317138e-05, |
| "loss": 7.3897, |
| "loss/crossentropy": 2.0968832850456236, |
| "loss/hidden": 3.34375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20618146620690822, |
| "step": 7590 |
| }, |
| { |
| "epoch": 0.76, |
| "grad_norm": 25.75, |
| "grad_norm_var": 1.8739583333333334, |
| "learning_rate": 6.890576474687263e-05, |
| "loss": 7.4515, |
| "loss/crossentropy": 2.093619105219841, |
| "loss/hidden": 3.216015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17545370263978838, |
| "step": 7600 |
| }, |
| { |
| "epoch": 0.761, |
| "grad_norm": 24.375, |
| "grad_norm_var": 3.160872395833333, |
| "learning_rate": 6.856920819750232e-05, |
| "loss": 7.3109, |
| "loss/crossentropy": 2.1301353871822357, |
| "loss/hidden": 3.273046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1940825032070279, |
| "step": 7610 |
| }, |
| { |
| "epoch": 0.762, |
| "grad_norm": 24.625, |
| "grad_norm_var": 2.948372395833333, |
| "learning_rate": 6.823181463545368e-05, |
| "loss": 7.4264, |
| "loss/crossentropy": 2.2378656387329103, |
| "loss/hidden": 3.28671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19179482720792293, |
| "step": 7620 |
| }, |
| { |
| "epoch": 0.763, |
| "grad_norm": 25.375, |
| "grad_norm_var": 1.6114583333333334, |
| "learning_rate": 6.789360487275092e-05, |
| "loss": 7.389, |
| "loss/crossentropy": 2.1321434706449507, |
| "loss/hidden": 3.32421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.21226093731820583, |
| "step": 7630 |
| }, |
| { |
| "epoch": 0.764, |
| "grad_norm": 24.25, |
| "grad_norm_var": 3.9145833333333333, |
| "learning_rate": 6.755459977176533e-05, |
| "loss": 7.3101, |
| "loss/crossentropy": 2.1095103308558465, |
| "loss/hidden": 3.282421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1865893481299281, |
| "step": 7640 |
| }, |
| { |
| "epoch": 0.765, |
| "grad_norm": 24.5, |
| "grad_norm_var": 1.1551432291666666, |
| "learning_rate": 6.721482024392835e-05, |
| "loss": 7.3427, |
| "loss/crossentropy": 2.2230965688824655, |
| "loss/hidden": 3.233203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1860642520710826, |
| "step": 7650 |
| }, |
| { |
| "epoch": 0.766, |
| "grad_norm": 37.75, |
| "grad_norm_var": 13.4837890625, |
| "learning_rate": 6.687428724844179e-05, |
| "loss": 7.3843, |
| "loss/crossentropy": 2.2692631423473357, |
| "loss/hidden": 3.254296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18289674110710621, |
| "step": 7660 |
| }, |
| { |
| "epoch": 0.767, |
| "grad_norm": 29.125, |
| "grad_norm_var": 2.7866726146420767e+18, |
| "learning_rate": 6.653302179098485e-05, |
| "loss": 7.3684, |
| "loss/crossentropy": 2.073717230185866, |
| "loss/hidden": 3.326953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18691302686929703, |
| "step": 7670 |
| }, |
| { |
| "epoch": 0.768, |
| "grad_norm": 25.75, |
| "grad_norm_var": 2.786672615963632e+18, |
| "learning_rate": 6.619104492241848e-05, |
| "loss": 7.2984, |
| "loss/crossentropy": 2.2619937866926194, |
| "loss/hidden": 3.318359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20932555850595236, |
| "step": 7680 |
| }, |
| { |
| "epoch": 0.769, |
| "grad_norm": 24.625, |
| "grad_norm_var": 12.002083333333333, |
| "learning_rate": 6.584837773748674e-05, |
| "loss": 7.2788, |
| "loss/crossentropy": 2.1440966367721557, |
| "loss/hidden": 3.273046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19258161056786777, |
| "step": 7690 |
| }, |
| { |
| "epoch": 0.77, |
| "grad_norm": 25.25, |
| "grad_norm_var": 9.005143229166666, |
| "learning_rate": 6.550504137351576e-05, |
| "loss": 7.3098, |
| "loss/crossentropy": 2.1658431753516196, |
| "loss/hidden": 3.184375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18627576418220998, |
| "step": 7700 |
| }, |
| { |
| "epoch": 0.771, |
| "grad_norm": 24.375, |
| "grad_norm_var": 1.33125, |
| "learning_rate": 6.516105700910967e-05, |
| "loss": 7.2005, |
| "loss/crossentropy": 2.1858869284391402, |
| "loss/hidden": 3.18359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18092084676027298, |
| "step": 7710 |
| }, |
| { |
| "epoch": 0.772, |
| "grad_norm": 26.125, |
| "grad_norm_var": 1.8452473958333333, |
| "learning_rate": 6.481644586284442e-05, |
| "loss": 7.2082, |
| "loss/crossentropy": 2.295649342238903, |
| "loss/hidden": 3.193359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1811120670288801, |
| "step": 7720 |
| }, |
| { |
| "epoch": 0.773, |
| "grad_norm": 22.625, |
| "grad_norm_var": 5.278580729166666, |
| "learning_rate": 6.447122919195875e-05, |
| "loss": 7.2226, |
| "loss/crossentropy": 2.1884284943342207, |
| "loss/hidden": 3.201953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17928373999893665, |
| "step": 7730 |
| }, |
| { |
| "epoch": 0.774, |
| "grad_norm": 27.0, |
| "grad_norm_var": 4.102018229166666, |
| "learning_rate": 6.412542829104307e-05, |
| "loss": 7.3724, |
| "loss/crossentropy": 2.3521720811724665, |
| "loss/hidden": 3.23203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1927757915109396, |
| "step": 7740 |
| }, |
| { |
| "epoch": 0.775, |
| "grad_norm": 23.5, |
| "grad_norm_var": 4.912239583333333, |
| "learning_rate": 6.377906449072578e-05, |
| "loss": 7.2959, |
| "loss/crossentropy": 2.1575791001319886, |
| "loss/hidden": 3.27734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19961551688611506, |
| "step": 7750 |
| }, |
| { |
| "epoch": 0.776, |
| "grad_norm": 24.375, |
| "grad_norm_var": 0.7666015625, |
| "learning_rate": 6.343215915635762e-05, |
| "loss": 7.1249, |
| "loss/crossentropy": 2.1519226878881454, |
| "loss/hidden": 3.190625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17577633038163185, |
| "step": 7760 |
| }, |
| { |
| "epoch": 0.777, |
| "grad_norm": 24.375, |
| "grad_norm_var": 8.60390625, |
| "learning_rate": 6.308473368669367e-05, |
| "loss": 7.2177, |
| "loss/crossentropy": 2.2416551634669304, |
| "loss/hidden": 3.16484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1746464787982404, |
| "step": 7770 |
| }, |
| { |
| "epoch": 0.778, |
| "grad_norm": 24.5, |
| "grad_norm_var": 8.609830729166667, |
| "learning_rate": 6.273680951257342e-05, |
| "loss": 7.3032, |
| "loss/crossentropy": 2.145657110214233, |
| "loss/hidden": 3.16953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18332088328897952, |
| "step": 7780 |
| }, |
| { |
| "epoch": 0.779, |
| "grad_norm": 24.5, |
| "grad_norm_var": 3.525, |
| "learning_rate": 6.238840809559884e-05, |
| "loss": 7.3272, |
| "loss/crossentropy": 2.1870901376008987, |
| "loss/hidden": 3.332421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.20739195179194211, |
| "step": 7790 |
| }, |
| { |
| "epoch": 0.78, |
| "grad_norm": 25.75, |
| "grad_norm_var": 3.0639973958333333, |
| "learning_rate": 6.203955092681039e-05, |
| "loss": 7.2885, |
| "loss/crossentropy": 2.2870864123106003, |
| "loss/hidden": 3.258984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19918184783309698, |
| "step": 7800 |
| }, |
| { |
| "epoch": 0.781, |
| "grad_norm": 24.25, |
| "grad_norm_var": 3.0737770908802703e+18, |
| "learning_rate": 6.169025952536154e-05, |
| "loss": 7.2785, |
| "loss/crossentropy": 2.115164430439472, |
| "loss/hidden": 3.187109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19251083210110664, |
| "step": 7810 |
| }, |
| { |
| "epoch": 0.782, |
| "grad_norm": 24.25, |
| "grad_norm_var": 2.634375, |
| "learning_rate": 6.134055543719121e-05, |
| "loss": 7.1323, |
| "loss/crossentropy": 2.1837784573435783, |
| "loss/hidden": 3.14765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1888807687908411, |
| "step": 7820 |
| }, |
| { |
| "epoch": 0.783, |
| "grad_norm": 23.5, |
| "grad_norm_var": 1.3385416666666667, |
| "learning_rate": 6.0990460233694854e-05, |
| "loss": 7.1168, |
| "loss/crossentropy": 2.2223884403705596, |
| "loss/hidden": 3.20234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18356790579855442, |
| "step": 7830 |
| }, |
| { |
| "epoch": 0.784, |
| "grad_norm": 24.25, |
| "grad_norm_var": 0.5754557291666667, |
| "learning_rate": 6.06399955103937e-05, |
| "loss": 7.0934, |
| "loss/crossentropy": 2.0307491227984427, |
| "loss/hidden": 3.166015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1779999515041709, |
| "step": 7840 |
| }, |
| { |
| "epoch": 0.785, |
| "grad_norm": 23.625, |
| "grad_norm_var": 31.23515625, |
| "learning_rate": 6.0289182885602704e-05, |
| "loss": 7.2069, |
| "loss/crossentropy": 2.175394120812416, |
| "loss/hidden": 3.194921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18147795423865318, |
| "step": 7850 |
| }, |
| { |
| "epoch": 0.786, |
| "grad_norm": 26.625, |
| "grad_norm_var": 31.609309895833334, |
| "learning_rate": 5.993804399909704e-05, |
| "loss": 7.1219, |
| "loss/crossentropy": 2.184116542339325, |
| "loss/hidden": 3.175, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18110901936888696, |
| "step": 7860 |
| }, |
| { |
| "epoch": 0.787, |
| "grad_norm": 25.0, |
| "grad_norm_var": 1.3416666666666666, |
| "learning_rate": 5.9586600510777255e-05, |
| "loss": 7.2392, |
| "loss/crossentropy": 2.1728118672966956, |
| "loss/hidden": 3.187890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17548323906958102, |
| "step": 7870 |
| }, |
| { |
| "epoch": 0.788, |
| "grad_norm": 23.625, |
| "grad_norm_var": 0.6979166666666666, |
| "learning_rate": 5.923487409933316e-05, |
| "loss": 7.2005, |
| "loss/crossentropy": 2.235177829861641, |
| "loss/hidden": 3.097265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17446751594543458, |
| "step": 7880 |
| }, |
| { |
| "epoch": 0.789, |
| "grad_norm": 24.625, |
| "grad_norm_var": 2.2514973958333333, |
| "learning_rate": 5.888288646090656e-05, |
| "loss": 7.1577, |
| "loss/crossentropy": 1.9824412673711778, |
| "loss/hidden": 3.108203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16306445542722942, |
| "step": 7890 |
| }, |
| { |
| "epoch": 0.79, |
| "grad_norm": 23.25, |
| "grad_norm_var": 1.6604166666666667, |
| "learning_rate": 5.8530659307753036e-05, |
| "loss": 7.1311, |
| "loss/crossentropy": 2.2140675693750382, |
| "loss/hidden": 3.209765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.19630825575441121, |
| "step": 7900 |
| }, |
| { |
| "epoch": 0.791, |
| "grad_norm": 22.5, |
| "grad_norm_var": 1.6166015625, |
| "learning_rate": 5.817821436690251e-05, |
| "loss": 7.0413, |
| "loss/crossentropy": 2.13326867967844, |
| "loss/hidden": 3.240234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17188256345689296, |
| "step": 7910 |
| }, |
| { |
| "epoch": 0.792, |
| "grad_norm": 23.25, |
| "grad_norm_var": 0.8625, |
| "learning_rate": 5.782557337881911e-05, |
| "loss": 7.0504, |
| "loss/crossentropy": 2.22673671990633, |
| "loss/hidden": 3.1046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18034335430711507, |
| "step": 7920 |
| }, |
| { |
| "epoch": 0.793, |
| "grad_norm": 23.25, |
| "grad_norm_var": 0.9280598958333334, |
| "learning_rate": 5.747275809606006e-05, |
| "loss": 7.0115, |
| "loss/crossentropy": 2.233048897981644, |
| "loss/hidden": 3.1375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17683454230427742, |
| "step": 7930 |
| }, |
| { |
| "epoch": 0.794, |
| "grad_norm": 22.625, |
| "grad_norm_var": 0.46139322916666664, |
| "learning_rate": 5.7119790281933914e-05, |
| "loss": 7.0223, |
| "loss/crossentropy": 2.1612079739570618, |
| "loss/hidden": 3.196875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18148380517959595, |
| "step": 7940 |
| }, |
| { |
| "epoch": 0.795, |
| "grad_norm": 23.875, |
| "grad_norm_var": 1.7061848958333334, |
| "learning_rate": 5.6766691709158096e-05, |
| "loss": 7.0717, |
| "loss/crossentropy": 2.174719399213791, |
| "loss/hidden": 3.202734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17878109030425549, |
| "step": 7950 |
| }, |
| { |
| "epoch": 0.796, |
| "grad_norm": 23.625, |
| "grad_norm_var": 2.5629557291666667, |
| "learning_rate": 5.641348415851577e-05, |
| "loss": 7.0505, |
| "loss/crossentropy": 2.113456728309393, |
| "loss/hidden": 3.148046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17140786703675986, |
| "step": 7960 |
| }, |
| { |
| "epoch": 0.797, |
| "grad_norm": 24.375, |
| "grad_norm_var": 2.1333333333333333, |
| "learning_rate": 5.6060189417512466e-05, |
| "loss": 7.1782, |
| "loss/crossentropy": 2.111274632811546, |
| "loss/hidden": 3.24453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18207379188388587, |
| "step": 7970 |
| }, |
| { |
| "epoch": 0.798, |
| "grad_norm": 23.5, |
| "grad_norm_var": 1.2254557291666666, |
| "learning_rate": 5.570682927903194e-05, |
| "loss": 7.0835, |
| "loss/crossentropy": 2.209978663921356, |
| "loss/hidden": 3.193359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18822428286075593, |
| "step": 7980 |
| }, |
| { |
| "epoch": 0.799, |
| "grad_norm": 23.25, |
| "grad_norm_var": 0.6559895833333333, |
| "learning_rate": 5.535342553999201e-05, |
| "loss": 7.0226, |
| "loss/crossentropy": 2.2532318040728567, |
| "loss/hidden": 3.062890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17306482810527085, |
| "step": 7990 |
| }, |
| { |
| "epoch": 0.8, |
| "grad_norm": 22.0, |
| "grad_norm_var": 1.3056640625, |
| "learning_rate": 5.500000000000001e-05, |
| "loss": 7.0514, |
| "loss/crossentropy": 2.112633790075779, |
| "loss/hidden": 3.14453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17193403597921134, |
| "step": 8000 |
| }, |
| { |
| "epoch": 0.801, |
| "grad_norm": 23.875, |
| "grad_norm_var": 0.9291666666666667, |
| "learning_rate": 5.464657446000802e-05, |
| "loss": 7.1599, |
| "loss/crossentropy": 2.2727265626192095, |
| "loss/hidden": 3.120703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1767945010215044, |
| "step": 8010 |
| }, |
| { |
| "epoch": 0.802, |
| "grad_norm": 21.75, |
| "grad_norm_var": 1.23515625, |
| "learning_rate": 5.429317072096808e-05, |
| "loss": 6.9704, |
| "loss/crossentropy": 2.2333207607269285, |
| "loss/hidden": 3.048828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16404416598379612, |
| "step": 8020 |
| }, |
| { |
| "epoch": 0.803, |
| "grad_norm": 24.125, |
| "grad_norm_var": 1.3072265625, |
| "learning_rate": 5.393981058248755e-05, |
| "loss": 7.0143, |
| "loss/crossentropy": 2.0972195714712143, |
| "loss/hidden": 3.14921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1729634340852499, |
| "step": 8030 |
| }, |
| { |
| "epoch": 0.804, |
| "grad_norm": 22.25, |
| "grad_norm_var": 1.4129557291666666, |
| "learning_rate": 5.358651584148423e-05, |
| "loss": 7.0155, |
| "loss/crossentropy": 1.9586305670440196, |
| "loss/hidden": 3.026171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15513296276330948, |
| "step": 8040 |
| }, |
| { |
| "epoch": 0.805, |
| "grad_norm": 23.125, |
| "grad_norm_var": 0.7759765625, |
| "learning_rate": 5.3233308290841935e-05, |
| "loss": 7.0464, |
| "loss/crossentropy": 2.170274239778519, |
| "loss/hidden": 3.231640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17621967550367118, |
| "step": 8050 |
| }, |
| { |
| "epoch": 0.806, |
| "grad_norm": 22.875, |
| "grad_norm_var": 1.0160807291666667, |
| "learning_rate": 5.288020971806609e-05, |
| "loss": 7.0331, |
| "loss/crossentropy": 2.2559945076704024, |
| "loss/hidden": 3.09375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17008322682231664, |
| "step": 8060 |
| }, |
| { |
| "epoch": 0.807, |
| "grad_norm": 22.625, |
| "grad_norm_var": 15.751041666666667, |
| "learning_rate": 5.252724190393995e-05, |
| "loss": 7.0494, |
| "loss/crossentropy": 2.177362951636314, |
| "loss/hidden": 3.099609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17315508853644132, |
| "step": 8070 |
| }, |
| { |
| "epoch": 0.808, |
| "grad_norm": 21.75, |
| "grad_norm_var": 1.6014973958333334, |
| "learning_rate": 5.2174426621180906e-05, |
| "loss": 6.9141, |
| "loss/crossentropy": 2.2173873841762544, |
| "loss/hidden": 3.11171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17051471378654243, |
| "step": 8080 |
| }, |
| { |
| "epoch": 0.809, |
| "grad_norm": 22.375, |
| "grad_norm_var": 8.440559895833333, |
| "learning_rate": 5.182178563309751e-05, |
| "loss": 7.0752, |
| "loss/crossentropy": 2.129284977912903, |
| "loss/hidden": 3.13203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16524926889687777, |
| "step": 8090 |
| }, |
| { |
| "epoch": 0.81, |
| "grad_norm": 22.5, |
| "grad_norm_var": 9.026822916666667, |
| "learning_rate": 5.1469340692246995e-05, |
| "loss": 6.8317, |
| "loss/crossentropy": 2.1604677557945253, |
| "loss/hidden": 3.07734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16097149150446058, |
| "step": 8100 |
| }, |
| { |
| "epoch": 0.811, |
| "grad_norm": 22.375, |
| "grad_norm_var": 1.4184895833333333, |
| "learning_rate": 5.1117113539093456e-05, |
| "loss": 7.0113, |
| "loss/crossentropy": 2.253964813053608, |
| "loss/hidden": 3.101171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17060668766498566, |
| "step": 8110 |
| }, |
| { |
| "epoch": 0.812, |
| "grad_norm": 23.125, |
| "grad_norm_var": 4728.34765625, |
| "learning_rate": 5.076512590066685e-05, |
| "loss": 7.0753, |
| "loss/crossentropy": 2.332234174013138, |
| "loss/hidden": 2.9859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15933743081986904, |
| "step": 8120 |
| }, |
| { |
| "epoch": 0.813, |
| "grad_norm": 23.125, |
| "grad_norm_var": 4740.253125, |
| "learning_rate": 5.0413399489222755e-05, |
| "loss": 6.9925, |
| "loss/crossentropy": 2.2431288778781893, |
| "loss/hidden": 3.177734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.18101395089179279, |
| "step": 8130 |
| }, |
| { |
| "epoch": 0.814, |
| "grad_norm": 21.75, |
| "grad_norm_var": 0.7171223958333334, |
| "learning_rate": 5.006195600090297e-05, |
| "loss": 6.8942, |
| "loss/crossentropy": 2.156078750640154, |
| "loss/hidden": 3.068359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1705356423743069, |
| "step": 8140 |
| }, |
| { |
| "epoch": 0.815, |
| "grad_norm": 22.5, |
| "grad_norm_var": 14.7931640625, |
| "learning_rate": 4.9710817114397314e-05, |
| "loss": 6.9409, |
| "loss/crossentropy": 2.2799797251820566, |
| "loss/hidden": 3.237890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.2035599894821644, |
| "step": 8150 |
| }, |
| { |
| "epoch": 0.816, |
| "grad_norm": 22.0, |
| "grad_norm_var": 16.298372395833333, |
| "learning_rate": 4.936000448960631e-05, |
| "loss": 6.9298, |
| "loss/crossentropy": 2.193594951927662, |
| "loss/hidden": 3.062890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16013095155358315, |
| "step": 8160 |
| }, |
| { |
| "epoch": 0.817, |
| "grad_norm": 22.0, |
| "grad_norm_var": 2.030989583333333, |
| "learning_rate": 4.9009539766305157e-05, |
| "loss": 6.8087, |
| "loss/crossentropy": 2.0724834606051443, |
| "loss/hidden": 3.053125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16694455780088902, |
| "step": 8170 |
| }, |
| { |
| "epoch": 0.818, |
| "grad_norm": 22.0, |
| "grad_norm_var": 3.192559534480346e+18, |
| "learning_rate": 4.865944456280879e-05, |
| "loss": 7.0056, |
| "loss/crossentropy": 1.9885679975152015, |
| "loss/hidden": 3.039453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15490864738821983, |
| "step": 8180 |
| }, |
| { |
| "epoch": 0.819, |
| "grad_norm": 22.0, |
| "grad_norm_var": 0.7572916666666667, |
| "learning_rate": 4.830974047463847e-05, |
| "loss": 6.951, |
| "loss/crossentropy": 2.2198346734046934, |
| "loss/hidden": 3.1046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1715863322839141, |
| "step": 8190 |
| }, |
| { |
| "epoch": 0.82, |
| "grad_norm": 22.375, |
| "grad_norm_var": 70.35774739583333, |
| "learning_rate": 4.7960449073189606e-05, |
| "loss": 6.9039, |
| "loss/crossentropy": 2.2345028445124626, |
| "loss/hidden": 3.020703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16092772465199232, |
| "step": 8200 |
| }, |
| { |
| "epoch": 0.821, |
| "grad_norm": 22.25, |
| "grad_norm_var": 0.9499348958333333, |
| "learning_rate": 4.761159190440117e-05, |
| "loss": 6.8918, |
| "loss/crossentropy": 2.25988983809948, |
| "loss/hidden": 2.99140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.162513430416584, |
| "step": 8210 |
| }, |
| { |
| "epoch": 0.822, |
| "grad_norm": 20.375, |
| "grad_norm_var": 2.3824041854824177e+18, |
| "learning_rate": 4.7263190487426564e-05, |
| "loss": 6.9202, |
| "loss/crossentropy": 2.2631596639752387, |
| "loss/hidden": 3.311328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17134508192539216, |
| "step": 8220 |
| }, |
| { |
| "epoch": 0.823, |
| "grad_norm": 21.75, |
| "grad_norm_var": 9.499739583333334, |
| "learning_rate": 4.691526631330634e-05, |
| "loss": 6.8251, |
| "loss/crossentropy": 2.1176006376743315, |
| "loss/hidden": 2.975390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15703603606671096, |
| "step": 8230 |
| }, |
| { |
| "epoch": 0.824, |
| "grad_norm": 22.25, |
| "grad_norm_var": 8.7369140625, |
| "learning_rate": 4.6567840843642384e-05, |
| "loss": 6.8875, |
| "loss/crossentropy": 2.1988213270902635, |
| "loss/hidden": 3.0828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15953818447887896, |
| "step": 8240 |
| }, |
| { |
| "epoch": 0.825, |
| "grad_norm": 21.625, |
| "grad_norm_var": 0.8999348958333333, |
| "learning_rate": 4.6220935509274235e-05, |
| "loss": 6.954, |
| "loss/crossentropy": 2.122755715250969, |
| "loss/hidden": 3.181640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1846215335652232, |
| "step": 8250 |
| }, |
| { |
| "epoch": 0.826, |
| "grad_norm": 21.375, |
| "grad_norm_var": 0.9421223958333333, |
| "learning_rate": 4.587457170895696e-05, |
| "loss": 6.7388, |
| "loss/crossentropy": 2.1177376963198187, |
| "loss/hidden": 3.015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16393411150202156, |
| "step": 8260 |
| }, |
| { |
| "epoch": 0.827, |
| "grad_norm": 22.25, |
| "grad_norm_var": 2.7309405742616806e+18, |
| "learning_rate": 4.552877080804126e-05, |
| "loss": 6.8162, |
| "loss/crossentropy": 2.2066329643130302, |
| "loss/hidden": 2.94140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15212294571101664, |
| "step": 8270 |
| }, |
| { |
| "epoch": 0.828, |
| "grad_norm": 21.5, |
| "grad_norm_var": 12.398958333333333, |
| "learning_rate": 4.5183554137155606e-05, |
| "loss": 6.9173, |
| "loss/crossentropy": 2.264900441467762, |
| "loss/hidden": 3.0015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16246109874919057, |
| "step": 8280 |
| }, |
| { |
| "epoch": 0.829, |
| "grad_norm": 21.625, |
| "grad_norm_var": 12.623893229166667, |
| "learning_rate": 4.483894299089034e-05, |
| "loss": 6.9313, |
| "loss/crossentropy": 2.2778148487210275, |
| "loss/hidden": 3.070703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17395032718777656, |
| "step": 8290 |
| }, |
| { |
| "epoch": 0.83, |
| "grad_norm": 35.25, |
| "grad_norm_var": 24.608333333333334, |
| "learning_rate": 4.4494958626484276e-05, |
| "loss": 6.6172, |
| "loss/crossentropy": 1.8980232715606689, |
| "loss/hidden": 3.024609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14263095702044665, |
| "step": 8300 |
| }, |
| { |
| "epoch": 0.831, |
| "grad_norm": 21.5, |
| "grad_norm_var": 13.6478515625, |
| "learning_rate": 4.415162226251327e-05, |
| "loss": 6.778, |
| "loss/crossentropy": 2.1019492864608766, |
| "loss/hidden": 3.0203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16046333201229573, |
| "step": 8310 |
| }, |
| { |
| "epoch": 0.832, |
| "grad_norm": 33.75, |
| "grad_norm_var": 20.048958333333335, |
| "learning_rate": 4.380895507758155e-05, |
| "loss": 6.8104, |
| "loss/crossentropy": 2.197945240139961, |
| "loss/hidden": 3.125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17097027003765106, |
| "step": 8320 |
| }, |
| { |
| "epoch": 0.833, |
| "grad_norm": 20.375, |
| "grad_norm_var": 10.97265625, |
| "learning_rate": 4.346697820901515e-05, |
| "loss": 6.8388, |
| "loss/crossentropy": 2.1463077016174794, |
| "loss/hidden": 3.020703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14912782991304993, |
| "step": 8330 |
| }, |
| { |
| "epoch": 0.834, |
| "grad_norm": 19.125, |
| "grad_norm_var": 14.505143229166666, |
| "learning_rate": 4.312571275155823e-05, |
| "loss": 6.7567, |
| "loss/crossentropy": 2.1706424072384833, |
| "loss/hidden": 2.925390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15102706514298916, |
| "step": 8340 |
| }, |
| { |
| "epoch": 0.835, |
| "grad_norm": 22.5, |
| "grad_norm_var": 10.55, |
| "learning_rate": 4.278517975607167e-05, |
| "loss": 6.6799, |
| "loss/crossentropy": 2.2084620505571366, |
| "loss/hidden": 2.93203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14671239536255598, |
| "step": 8350 |
| }, |
| { |
| "epoch": 0.836, |
| "grad_norm": 20.25, |
| "grad_norm_var": 9.618684895833333, |
| "learning_rate": 4.2445400228234686e-05, |
| "loss": 6.8098, |
| "loss/crossentropy": 2.24121166318655, |
| "loss/hidden": 3.064453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17096446882933378, |
| "step": 8360 |
| }, |
| { |
| "epoch": 0.837, |
| "grad_norm": 37.75, |
| "grad_norm_var": 21.941080729166668, |
| "learning_rate": 4.2106395127249076e-05, |
| "loss": 6.778, |
| "loss/crossentropy": 2.196937990188599, |
| "loss/hidden": 2.94296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16012835130095482, |
| "step": 8370 |
| }, |
| { |
| "epoch": 0.838, |
| "grad_norm": 21.0, |
| "grad_norm_var": 22.3791015625, |
| "learning_rate": 4.176818536454633e-05, |
| "loss": 6.7274, |
| "loss/crossentropy": 2.2157079711556436, |
| "loss/hidden": 3.119921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.179295320995152, |
| "step": 8380 |
| }, |
| { |
| "epoch": 0.839, |
| "grad_norm": 21.125, |
| "grad_norm_var": 11.954622395833333, |
| "learning_rate": 4.14307918024977e-05, |
| "loss": 6.7335, |
| "loss/crossentropy": 1.9891890831291676, |
| "loss/hidden": 3.04453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15178165752440692, |
| "step": 8390 |
| }, |
| { |
| "epoch": 0.84, |
| "grad_norm": 20.75, |
| "grad_norm_var": 12.85390625, |
| "learning_rate": 4.109423525312738e-05, |
| "loss": 6.8472, |
| "loss/crossentropy": 2.2481492303311823, |
| "loss/hidden": 3.0734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1720767416059971, |
| "step": 8400 |
| }, |
| { |
| "epoch": 0.841, |
| "grad_norm": 20.125, |
| "grad_norm_var": 12.489322916666667, |
| "learning_rate": 4.0758536476828624e-05, |
| "loss": 6.753, |
| "loss/crossentropy": 2.162366569042206, |
| "loss/hidden": 3.02421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16984781231731177, |
| "step": 8410 |
| }, |
| { |
| "epoch": 0.842, |
| "grad_norm": 20.375, |
| "grad_norm_var": 13.0087890625, |
| "learning_rate": 4.042371618108329e-05, |
| "loss": 6.6159, |
| "loss/crossentropy": 2.0167182251811027, |
| "loss/hidden": 2.815234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.13584452606737613, |
| "step": 8420 |
| }, |
| { |
| "epoch": 0.843, |
| "grad_norm": 18.875, |
| "grad_norm_var": 10.72890625, |
| "learning_rate": 4.0089795019184303e-05, |
| "loss": 6.7834, |
| "loss/crossentropy": 2.1324578002095222, |
| "loss/hidden": 3.08828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1533666567876935, |
| "step": 8430 |
| }, |
| { |
| "epoch": 0.844, |
| "grad_norm": 19.25, |
| "grad_norm_var": 8.525, |
| "learning_rate": 3.9756793588961896e-05, |
| "loss": 6.7359, |
| "loss/crossentropy": 2.101317623257637, |
| "loss/hidden": 2.938671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15315671488642693, |
| "step": 8440 |
| }, |
| { |
| "epoch": 0.845, |
| "grad_norm": 21.625, |
| "grad_norm_var": 6.635416666666667, |
| "learning_rate": 3.942473243151281e-05, |
| "loss": 6.7393, |
| "loss/crossentropy": 2.094627208262682, |
| "loss/hidden": 3.019921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15584880905225873, |
| "step": 8450 |
| }, |
| { |
| "epoch": 0.846, |
| "grad_norm": 20.125, |
| "grad_norm_var": 4.204622395833334, |
| "learning_rate": 3.9093632029933435e-05, |
| "loss": 6.721, |
| "loss/crossentropy": 2.2385535068809985, |
| "loss/hidden": 3.049609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1660825703293085, |
| "step": 8460 |
| }, |
| { |
| "epoch": 0.847, |
| "grad_norm": 19.0, |
| "grad_norm_var": 10.87890625, |
| "learning_rate": 3.8763512808056125e-05, |
| "loss": 6.5906, |
| "loss/crossentropy": 2.1519340083003042, |
| "loss/hidden": 2.949609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15027154460549355, |
| "step": 8470 |
| }, |
| { |
| "epoch": 0.848, |
| "grad_norm": 29.625, |
| "grad_norm_var": 13.39140625, |
| "learning_rate": 3.843439512918949e-05, |
| "loss": 6.7149, |
| "loss/crossentropy": 2.233719590306282, |
| "loss/hidden": 3.010546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1669364469125867, |
| "step": 8480 |
| }, |
| { |
| "epoch": 0.849, |
| "grad_norm": 20.5, |
| "grad_norm_var": 8.970768229166667, |
| "learning_rate": 3.810629929486226e-05, |
| "loss": 6.7086, |
| "loss/crossentropy": 2.226981985569, |
| "loss/hidden": 3.01484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16180209238082172, |
| "step": 8490 |
| }, |
| { |
| "epoch": 0.85, |
| "grad_norm": 19.25, |
| "grad_norm_var": 8.365559895833334, |
| "learning_rate": 3.777924554357096e-05, |
| "loss": 6.6027, |
| "loss/crossentropy": 2.080954398959875, |
| "loss/hidden": 2.93515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1519384733401239, |
| "step": 8500 |
| }, |
| { |
| "epoch": 0.851, |
| "grad_norm": 27.625, |
| "grad_norm_var": 10.964322916666667, |
| "learning_rate": 3.7453254049531536e-05, |
| "loss": 6.816, |
| "loss/crossentropy": 2.1905113458633423, |
| "loss/hidden": 3.0546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16897472254931928, |
| "step": 8510 |
| }, |
| { |
| "epoch": 0.852, |
| "grad_norm": 26.625, |
| "grad_norm_var": 6.923372395833334, |
| "learning_rate": 3.712834492143488e-05, |
| "loss": 6.7829, |
| "loss/crossentropy": 2.037618358433247, |
| "loss/hidden": 2.925, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14524593306705355, |
| "step": 8520 |
| }, |
| { |
| "epoch": 0.853, |
| "grad_norm": 28.875, |
| "grad_norm_var": 8.1009765625, |
| "learning_rate": 3.6804538201206483e-05, |
| "loss": 6.7153, |
| "loss/crossentropy": 2.2185431465506555, |
| "loss/hidden": 2.95703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16105205966159702, |
| "step": 8530 |
| }, |
| { |
| "epoch": 0.854, |
| "grad_norm": 19.375, |
| "grad_norm_var": 9.983072916666666, |
| "learning_rate": 3.648185386277011e-05, |
| "loss": 6.6926, |
| "loss/crossentropy": 1.935778360068798, |
| "loss/hidden": 3.0765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15904303397983313, |
| "step": 8540 |
| }, |
| { |
| "epoch": 0.855, |
| "grad_norm": 19.625, |
| "grad_norm_var": 7.237434895833333, |
| "learning_rate": 3.616031181081575e-05, |
| "loss": 6.7702, |
| "loss/crossentropy": 2.147594654560089, |
| "loss/hidden": 3.08125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17960532996803522, |
| "step": 8550 |
| }, |
| { |
| "epoch": 0.856, |
| "grad_norm": 20.25, |
| "grad_norm_var": 17.895833333333332, |
| "learning_rate": 3.583993187957173e-05, |
| "loss": 6.6853, |
| "loss/crossentropy": 2.1908980406820775, |
| "loss/hidden": 2.966796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1526848678011447, |
| "step": 8560 |
| }, |
| { |
| "epoch": 0.857, |
| "grad_norm": 19.875, |
| "grad_norm_var": 20.8337890625, |
| "learning_rate": 3.5520733831581396e-05, |
| "loss": 6.6626, |
| "loss/crossentropy": 2.1303799264132977, |
| "loss/hidden": 3.020703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15846395073458552, |
| "step": 8570 |
| }, |
| { |
| "epoch": 0.858, |
| "grad_norm": 28.75, |
| "grad_norm_var": 14.406705729166667, |
| "learning_rate": 3.520273735648382e-05, |
| "loss": 6.7556, |
| "loss/crossentropy": 2.3622685760259627, |
| "loss/hidden": 2.95625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1620245823636651, |
| "step": 8580 |
| }, |
| { |
| "epoch": 0.859, |
| "grad_norm": 19.625, |
| "grad_norm_var": 8.196809895833333, |
| "learning_rate": 3.4885962069799554e-05, |
| "loss": 6.7115, |
| "loss/crossentropy": 2.1019712671637536, |
| "loss/hidden": 2.92421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14273924147710204, |
| "step": 8590 |
| }, |
| { |
| "epoch": 0.86, |
| "grad_norm": 18.5, |
| "grad_norm_var": 6.0806640625, |
| "learning_rate": 3.45704275117204e-05, |
| "loss": 6.6569, |
| "loss/crossentropy": 2.1646031074225904, |
| "loss/hidden": 3.039453125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1613057630136609, |
| "step": 8600 |
| }, |
| { |
| "epoch": 0.861, |
| "grad_norm": 17.625, |
| "grad_norm_var": 6.801822916666667, |
| "learning_rate": 3.42561531459042e-05, |
| "loss": 6.7693, |
| "loss/crossentropy": 2.0148328594863414, |
| "loss/hidden": 3.09140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16505753956735134, |
| "step": 8610 |
| }, |
| { |
| "epoch": 0.862, |
| "grad_norm": 18.75, |
| "grad_norm_var": 7.334830729166667, |
| "learning_rate": 3.394315835827421e-05, |
| "loss": 6.7534, |
| "loss/crossentropy": 2.3073144420981406, |
| "loss/hidden": 2.981640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15456721372902393, |
| "step": 8620 |
| }, |
| { |
| "epoch": 0.863, |
| "grad_norm": 18.625, |
| "grad_norm_var": 6.7712890625, |
| "learning_rate": 3.363146245582325e-05, |
| "loss": 6.6867, |
| "loss/crossentropy": 2.1204227074980735, |
| "loss/hidden": 2.97578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15875139143317937, |
| "step": 8630 |
| }, |
| { |
| "epoch": 0.864, |
| "grad_norm": 27.75, |
| "grad_norm_var": 9.9619140625, |
| "learning_rate": 3.3321084665422807e-05, |
| "loss": 6.7054, |
| "loss/crossentropy": 2.1530178070068358, |
| "loss/hidden": 3.030078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15609838385134936, |
| "step": 8640 |
| }, |
| { |
| "epoch": 0.865, |
| "grad_norm": 19.5, |
| "grad_norm_var": 6.264583333333333, |
| "learning_rate": 3.301204413263704e-05, |
| "loss": 6.7304, |
| "loss/crossentropy": 2.134761989861727, |
| "loss/hidden": 2.95859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15059877443127334, |
| "step": 8650 |
| }, |
| { |
| "epoch": 0.866, |
| "grad_norm": 19.375, |
| "grad_norm_var": 61.580989583333334, |
| "learning_rate": 3.270435992054166e-05, |
| "loss": 6.7185, |
| "loss/crossentropy": 2.227277898788452, |
| "loss/hidden": 2.878515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15938994903117418, |
| "step": 8660 |
| }, |
| { |
| "epoch": 0.867, |
| "grad_norm": 19.5, |
| "grad_norm_var": 66.7259765625, |
| "learning_rate": 3.239805100854823e-05, |
| "loss": 6.7492, |
| "loss/crossentropy": 2.2605167113244535, |
| "loss/hidden": 2.892578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1458374190144241, |
| "step": 8670 |
| }, |
| { |
| "epoch": 0.868, |
| "grad_norm": 20.125, |
| "grad_norm_var": 11.395247395833334, |
| "learning_rate": 3.2093136291233296e-05, |
| "loss": 6.5492, |
| "loss/crossentropy": 1.981280042976141, |
| "loss/hidden": 2.947265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14381110519170762, |
| "step": 8680 |
| }, |
| { |
| "epoch": 0.869, |
| "grad_norm": 20.125, |
| "grad_norm_var": 7.0509765625, |
| "learning_rate": 3.178963457717287e-05, |
| "loss": 6.7433, |
| "loss/crossentropy": 2.047644779831171, |
| "loss/hidden": 3.013671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1535273300483823, |
| "step": 8690 |
| }, |
| { |
| "epoch": 0.87, |
| "grad_norm": 19.0, |
| "grad_norm_var": 4.912239583333333, |
| "learning_rate": 3.1487564587782306e-05, |
| "loss": 6.6896, |
| "loss/crossentropy": 2.097446948289871, |
| "loss/hidden": 2.985546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15309033393859864, |
| "step": 8700 |
| }, |
| { |
| "epoch": 0.871, |
| "grad_norm": 19.875, |
| "grad_norm_var": 4.342643229166667, |
| "learning_rate": 3.118694495616143e-05, |
| "loss": 6.6671, |
| "loss/crossentropy": 2.203226202726364, |
| "loss/hidden": 2.9078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1557199139147997, |
| "step": 8710 |
| }, |
| { |
| "epoch": 0.872, |
| "grad_norm": 19.25, |
| "grad_norm_var": 7.030208333333333, |
| "learning_rate": 3.088779422594514e-05, |
| "loss": 6.7012, |
| "loss/crossentropy": 2.27069154381752, |
| "loss/hidden": 3.011328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16882256120443345, |
| "step": 8720 |
| }, |
| { |
| "epoch": 0.873, |
| "grad_norm": 18.25, |
| "grad_norm_var": 6.295768229166667, |
| "learning_rate": 3.059013085015967e-05, |
| "loss": 6.7011, |
| "loss/crossentropy": 2.1291425973176956, |
| "loss/hidden": 3.0296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15904739797115325, |
| "step": 8730 |
| }, |
| { |
| "epoch": 0.874, |
| "grad_norm": 18.75, |
| "grad_norm_var": 7.843489583333334, |
| "learning_rate": 3.0293973190084068e-05, |
| "loss": 6.5634, |
| "loss/crossentropy": 2.0249536350369453, |
| "loss/hidden": 2.866796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14526209011673927, |
| "step": 8740 |
| }, |
| { |
| "epoch": 0.875, |
| "grad_norm": 19.25, |
| "grad_norm_var": 24.33125, |
| "learning_rate": 2.9999339514117912e-05, |
| "loss": 6.6656, |
| "loss/crossentropy": 2.2232491344213487, |
| "loss/hidden": 2.915625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15606734082102774, |
| "step": 8750 |
| }, |
| { |
| "epoch": 0.876, |
| "grad_norm": 17.5, |
| "grad_norm_var": 25.318684895833332, |
| "learning_rate": 2.9706247996654137e-05, |
| "loss": 6.6062, |
| "loss/crossentropy": 2.239446271210909, |
| "loss/hidden": 2.93203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1495282245334238, |
| "step": 8760 |
| }, |
| { |
| "epoch": 0.877, |
| "grad_norm": 20.0, |
| "grad_norm_var": 10.77265625, |
| "learning_rate": 2.9414716716958125e-05, |
| "loss": 6.7024, |
| "loss/crossentropy": 2.199841946363449, |
| "loss/hidden": 2.98984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16247558798640965, |
| "step": 8770 |
| }, |
| { |
| "epoch": 0.878, |
| "grad_norm": 19.125, |
| "grad_norm_var": 8.998958333333333, |
| "learning_rate": 2.9124763658052478e-05, |
| "loss": 6.782, |
| "loss/crossentropy": 2.1751702331006526, |
| "loss/hidden": 2.92890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1545461408793926, |
| "step": 8780 |
| }, |
| { |
| "epoch": 0.879, |
| "grad_norm": 19.75, |
| "grad_norm_var": 6.75390625, |
| "learning_rate": 2.8836406705607627e-05, |
| "loss": 6.6353, |
| "loss/crossentropy": 2.1896474927663805, |
| "loss/hidden": 3.0078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1576914018020034, |
| "step": 8790 |
| }, |
| { |
| "epoch": 0.88, |
| "grad_norm": 18.625, |
| "grad_norm_var": 6.298372395833334, |
| "learning_rate": 2.854966364683872e-05, |
| "loss": 6.6865, |
| "loss/crossentropy": 2.1086228668689726, |
| "loss/hidden": 2.943359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14896233137696982, |
| "step": 8800 |
| }, |
| { |
| "epoch": 0.881, |
| "grad_norm": 18.125, |
| "grad_norm_var": 8.4791015625, |
| "learning_rate": 2.826455216940827e-05, |
| "loss": 6.6194, |
| "loss/crossentropy": 1.9843620665371418, |
| "loss/hidden": 2.90234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.13960447236895562, |
| "step": 8810 |
| }, |
| { |
| "epoch": 0.882, |
| "grad_norm": 18.75, |
| "grad_norm_var": 8.501822916666667, |
| "learning_rate": 2.798108986033523e-05, |
| "loss": 6.6478, |
| "loss/crossentropy": 2.1602120816707613, |
| "loss/hidden": 2.89765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1467864315956831, |
| "step": 8820 |
| }, |
| { |
| "epoch": 0.883, |
| "grad_norm": 20.0, |
| "grad_norm_var": 14.489518229166666, |
| "learning_rate": 2.769929420491002e-05, |
| "loss": 6.5831, |
| "loss/crossentropy": 2.1451182171702383, |
| "loss/hidden": 3.0640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1538067078217864, |
| "step": 8830 |
| }, |
| { |
| "epoch": 0.884, |
| "grad_norm": 28.125, |
| "grad_norm_var": 11.8650390625, |
| "learning_rate": 2.741918258561607e-05, |
| "loss": 6.6807, |
| "loss/crossentropy": 2.2480786412954332, |
| "loss/hidden": 2.9171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15250301491469145, |
| "step": 8840 |
| }, |
| { |
| "epoch": 0.885, |
| "grad_norm": 20.125, |
| "grad_norm_var": 5.908333333333333, |
| "learning_rate": 2.7140772281057468e-05, |
| "loss": 6.7234, |
| "loss/crossentropy": 2.122820168733597, |
| "loss/hidden": 2.941796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15208065062761306, |
| "step": 8850 |
| }, |
| { |
| "epoch": 0.886, |
| "grad_norm": 20.5, |
| "grad_norm_var": 5.928580729166667, |
| "learning_rate": 2.6864080464893282e-05, |
| "loss": 6.729, |
| "loss/crossentropy": 2.239290738105774, |
| "loss/hidden": 2.94921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16029757130891084, |
| "step": 8860 |
| }, |
| { |
| "epoch": 0.887, |
| "grad_norm": 17.25, |
| "grad_norm_var": 7.351822916666666, |
| "learning_rate": 2.6589124204778006e-05, |
| "loss": 6.716, |
| "loss/crossentropy": 2.0530457444489003, |
| "loss/hidden": 3.00390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15680783092975617, |
| "step": 8870 |
| }, |
| { |
| "epoch": 0.888, |
| "grad_norm": 20.75, |
| "grad_norm_var": 1.4462890625, |
| "learning_rate": 2.6315920461308964e-05, |
| "loss": 6.6087, |
| "loss/crossentropy": 1.8908826246857644, |
| "loss/hidden": 2.987109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15084341559559106, |
| "step": 8880 |
| }, |
| { |
| "epoch": 0.889, |
| "grad_norm": 18.25, |
| "grad_norm_var": 0.871875, |
| "learning_rate": 2.6044486086979953e-05, |
| "loss": 6.7122, |
| "loss/crossentropy": 2.1536234870553015, |
| "loss/hidden": 3.026171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15791046461090447, |
| "step": 8890 |
| }, |
| { |
| "epoch": 0.89, |
| "grad_norm": 20.125, |
| "grad_norm_var": 0.9384765625, |
| "learning_rate": 2.577483782514174e-05, |
| "loss": 6.7238, |
| "loss/crossentropy": 2.2359716653823853, |
| "loss/hidden": 3.04921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1683175450190902, |
| "step": 8900 |
| }, |
| { |
| "epoch": 0.891, |
| "grad_norm": 20.0, |
| "grad_norm_var": 1.2202473958333333, |
| "learning_rate": 2.550699230896928e-05, |
| "loss": 6.6606, |
| "loss/crossentropy": 2.084880957007408, |
| "loss/hidden": 3.04296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1634566718712449, |
| "step": 8910 |
| }, |
| { |
| "epoch": 0.892, |
| "grad_norm": 19.25, |
| "grad_norm_var": 1.1238932291666666, |
| "learning_rate": 2.5240966060435677e-05, |
| "loss": 6.6584, |
| "loss/crossentropy": 2.0426858074963095, |
| "loss/hidden": 2.9921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14660524372011424, |
| "step": 8920 |
| }, |
| { |
| "epoch": 0.893, |
| "grad_norm": 17.5, |
| "grad_norm_var": 0.8619140625, |
| "learning_rate": 2.497677548929301e-05, |
| "loss": 6.6837, |
| "loss/crossentropy": 2.171066698431969, |
| "loss/hidden": 3.036328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16031729001551867, |
| "step": 8930 |
| }, |
| { |
| "epoch": 0.894, |
| "grad_norm": 19.25, |
| "grad_norm_var": 0.73125, |
| "learning_rate": 2.4714436892060213e-05, |
| "loss": 6.6919, |
| "loss/crossentropy": 2.1432586669921876, |
| "loss/hidden": 2.93359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14423664398491381, |
| "step": 8940 |
| }, |
| { |
| "epoch": 0.895, |
| "grad_norm": 23.5, |
| "grad_norm_var": 1.6395833333333334, |
| "learning_rate": 2.445396645101762e-05, |
| "loss": 6.6747, |
| "loss/crossentropy": 2.170257590711117, |
| "loss/hidden": 2.941015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15344964815303683, |
| "step": 8950 |
| }, |
| { |
| "epoch": 0.896, |
| "grad_norm": 18.75, |
| "grad_norm_var": 1.85390625, |
| "learning_rate": 2.4195380233209008e-05, |
| "loss": 6.6903, |
| "loss/crossentropy": 2.2802744664251806, |
| "loss/hidden": 3.00078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16574028157629073, |
| "step": 8960 |
| }, |
| { |
| "epoch": 0.897, |
| "grad_norm": 18.25, |
| "grad_norm_var": 1.1160807291666666, |
| "learning_rate": 2.393869418945033e-05, |
| "loss": 6.6492, |
| "loss/crossentropy": 2.2105372130870817, |
| "loss/hidden": 2.98671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15795816536992788, |
| "step": 8970 |
| }, |
| { |
| "epoch": 0.898, |
| "grad_norm": 20.0, |
| "grad_norm_var": 0.83515625, |
| "learning_rate": 2.3683924153345856e-05, |
| "loss": 6.6716, |
| "loss/crossentropy": 2.190083122253418, |
| "loss/hidden": 2.91484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14693394694477319, |
| "step": 8980 |
| }, |
| { |
| "epoch": 0.899, |
| "grad_norm": 18.5, |
| "grad_norm_var": 1.5457682291666666, |
| "learning_rate": 2.3431085840311496e-05, |
| "loss": 6.652, |
| "loss/crossentropy": 2.168794426321983, |
| "loss/hidden": 2.962109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15159044261090457, |
| "step": 8990 |
| }, |
| { |
| "epoch": 0.9, |
| "grad_norm": 19.0, |
| "grad_norm_var": 0.3184895833333333, |
| "learning_rate": 2.3180194846605367e-05, |
| "loss": 6.6702, |
| "loss/crossentropy": 2.0840190701186656, |
| "loss/hidden": 2.9046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14546455312520265, |
| "step": 9000 |
| }, |
| { |
| "epoch": 0.901, |
| "grad_norm": 17.125, |
| "grad_norm_var": 12.045768229166667, |
| "learning_rate": 2.2931266648365763e-05, |
| "loss": 6.551, |
| "loss/crossentropy": 2.249745097756386, |
| "loss/hidden": 2.95234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14781816666945816, |
| "step": 9010 |
| }, |
| { |
| "epoch": 0.902, |
| "grad_norm": 18.0, |
| "grad_norm_var": 5.9103515625, |
| "learning_rate": 2.268431660065651e-05, |
| "loss": 6.6396, |
| "loss/crossentropy": 2.1970891691744328, |
| "loss/hidden": 2.925390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14704765304923056, |
| "step": 9020 |
| }, |
| { |
| "epoch": 0.903, |
| "grad_norm": 19.25, |
| "grad_norm_var": 2.7354166666666666, |
| "learning_rate": 2.2439359936519788e-05, |
| "loss": 6.5875, |
| "loss/crossentropy": 2.03671035990119, |
| "loss/hidden": 3.018359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15092918761074542, |
| "step": 9030 |
| }, |
| { |
| "epoch": 0.904, |
| "grad_norm": 19.625, |
| "grad_norm_var": 1.2052083333333334, |
| "learning_rate": 2.219641176603649e-05, |
| "loss": 6.6755, |
| "loss/crossentropy": 2.090250726044178, |
| "loss/hidden": 2.977734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15508323018439113, |
| "step": 9040 |
| }, |
| { |
| "epoch": 0.905, |
| "grad_norm": 18.75, |
| "grad_norm_var": 1.5997395833333334, |
| "learning_rate": 2.195548707539416e-05, |
| "loss": 6.5437, |
| "loss/crossentropy": 2.1223932191729546, |
| "loss/hidden": 2.994140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1513491977006197, |
| "step": 9050 |
| }, |
| { |
| "epoch": 0.906, |
| "grad_norm": 17.875, |
| "grad_norm_var": 1.0489583333333334, |
| "learning_rate": 2.1716600725962562e-05, |
| "loss": 6.7282, |
| "loss/crossentropy": 2.2691319927573206, |
| "loss/hidden": 2.9421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15906442627310752, |
| "step": 9060 |
| }, |
| { |
| "epoch": 0.907, |
| "grad_norm": 17.375, |
| "grad_norm_var": 2.015625, |
| "learning_rate": 2.147976745337702e-05, |
| "loss": 6.744, |
| "loss/crossentropy": 2.1490016683936117, |
| "loss/hidden": 3.001953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16720277369022368, |
| "step": 9070 |
| }, |
| { |
| "epoch": 0.908, |
| "grad_norm": 18.875, |
| "grad_norm_var": 0.653125, |
| "learning_rate": 2.1245001866629322e-05, |
| "loss": 6.5938, |
| "loss/crossentropy": 2.0249759793281554, |
| "loss/hidden": 2.986328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15444286894053222, |
| "step": 9080 |
| }, |
| { |
| "epoch": 0.909, |
| "grad_norm": 18.75, |
| "grad_norm_var": 0.3666015625, |
| "learning_rate": 2.1012318447166743e-05, |
| "loss": 6.6076, |
| "loss/crossentropy": 2.043487349152565, |
| "loss/hidden": 3.016796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1601484829559922, |
| "step": 9090 |
| }, |
| { |
| "epoch": 0.91, |
| "grad_norm": 19.125, |
| "grad_norm_var": 0.36868489583333336, |
| "learning_rate": 2.0781731547998614e-05, |
| "loss": 6.6279, |
| "loss/crossentropy": 2.207579642534256, |
| "loss/hidden": 2.980078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15659495070576668, |
| "step": 9100 |
| }, |
| { |
| "epoch": 0.911, |
| "grad_norm": 19.0, |
| "grad_norm_var": 0.8202473958333333, |
| "learning_rate": 2.055325539281104e-05, |
| "loss": 6.7914, |
| "loss/crossentropy": 2.274830712378025, |
| "loss/hidden": 2.973828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1602309113368392, |
| "step": 9110 |
| }, |
| { |
| "epoch": 0.912, |
| "grad_norm": 19.5, |
| "grad_norm_var": 1.11640625, |
| "learning_rate": 2.0326904075089492e-05, |
| "loss": 6.7296, |
| "loss/crossentropy": 2.2438980847597123, |
| "loss/hidden": 3.09921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17445293702185155, |
| "step": 9120 |
| }, |
| { |
| "epoch": 0.913, |
| "grad_norm": 19.875, |
| "grad_norm_var": 0.7061848958333333, |
| "learning_rate": 2.0102691557249456e-05, |
| "loss": 6.6545, |
| "loss/crossentropy": 2.1133376926183702, |
| "loss/hidden": 2.894140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14513556826859714, |
| "step": 9130 |
| }, |
| { |
| "epoch": 0.914, |
| "grad_norm": 17.75, |
| "grad_norm_var": 1.8296223958333333, |
| "learning_rate": 1.9880631669775164e-05, |
| "loss": 6.6221, |
| "loss/crossentropy": 2.132697519659996, |
| "loss/hidden": 2.955859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16242318861186505, |
| "step": 9140 |
| }, |
| { |
| "epoch": 0.915, |
| "grad_norm": 17.625, |
| "grad_norm_var": 3.874739583333333, |
| "learning_rate": 1.966073811036649e-05, |
| "loss": 6.6691, |
| "loss/crossentropy": 2.164944548904896, |
| "loss/hidden": 2.848046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14622690286487341, |
| "step": 9150 |
| }, |
| { |
| "epoch": 0.916, |
| "grad_norm": 22.125, |
| "grad_norm_var": 2.6801432291666667, |
| "learning_rate": 1.944302444309393e-05, |
| "loss": 6.6237, |
| "loss/crossentropy": 2.149251754581928, |
| "loss/hidden": 3.013671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16157012842595578, |
| "step": 9160 |
| }, |
| { |
| "epoch": 0.917, |
| "grad_norm": 18.875, |
| "grad_norm_var": 1.50625, |
| "learning_rate": 1.922750409756205e-05, |
| "loss": 6.6684, |
| "loss/crossentropy": 2.1195749223232268, |
| "loss/hidden": 2.998828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15725019499659537, |
| "step": 9170 |
| }, |
| { |
| "epoch": 0.918, |
| "grad_norm": 19.125, |
| "grad_norm_var": 5.559375, |
| "learning_rate": 1.9014190368080926e-05, |
| "loss": 6.7637, |
| "loss/crossentropy": 2.1270042449235915, |
| "loss/hidden": 3.12578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17823693621903658, |
| "step": 9180 |
| }, |
| { |
| "epoch": 0.919, |
| "grad_norm": 19.25, |
| "grad_norm_var": 5.692122395833334, |
| "learning_rate": 1.88030964128462e-05, |
| "loss": 6.7194, |
| "loss/crossentropy": 2.163611389696598, |
| "loss/hidden": 2.858203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.13989691147580743, |
| "step": 9190 |
| }, |
| { |
| "epoch": 0.92, |
| "grad_norm": 18.875, |
| "grad_norm_var": 0.8552083333333333, |
| "learning_rate": 1.8594235253127375e-05, |
| "loss": 6.6762, |
| "loss/crossentropy": 2.1259265393018723, |
| "loss/hidden": 2.963671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15320588331669568, |
| "step": 9200 |
| }, |
| { |
| "epoch": 0.921, |
| "grad_norm": 19.75, |
| "grad_norm_var": 0.9309895833333334, |
| "learning_rate": 1.838761977246459e-05, |
| "loss": 6.6246, |
| "loss/crossentropy": 2.1278042390942575, |
| "loss/hidden": 2.97109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1498261951841414, |
| "step": 9210 |
| }, |
| { |
| "epoch": 0.922, |
| "grad_norm": 18.875, |
| "grad_norm_var": 4.901822916666666, |
| "learning_rate": 1.818326271587394e-05, |
| "loss": 6.7278, |
| "loss/crossentropy": 2.222877550125122, |
| "loss/hidden": 2.95859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1604774661362171, |
| "step": 9220 |
| }, |
| { |
| "epoch": 0.923, |
| "grad_norm": 19.5, |
| "grad_norm_var": 5.441666666666666, |
| "learning_rate": 1.7981176689061317e-05, |
| "loss": 6.5921, |
| "loss/crossentropy": 2.1431047171354294, |
| "loss/hidden": 2.998046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14911785759031773, |
| "step": 9230 |
| }, |
| { |
| "epoch": 0.924, |
| "grad_norm": 18.375, |
| "grad_norm_var": 1.5634765625, |
| "learning_rate": 1.7781374157644715e-05, |
| "loss": 6.4714, |
| "loss/crossentropy": 2.089540404081345, |
| "loss/hidden": 2.984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15576251531019808, |
| "step": 9240 |
| }, |
| { |
| "epoch": 0.925, |
| "grad_norm": 18.625, |
| "grad_norm_var": 0.5483723958333333, |
| "learning_rate": 1.758386744638546e-05, |
| "loss": 6.7243, |
| "loss/crossentropy": 2.2453290104866026, |
| "loss/hidden": 2.94609375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14587981197983027, |
| "step": 9250 |
| }, |
| { |
| "epoch": 0.926, |
| "grad_norm": 20.625, |
| "grad_norm_var": 3.0152303024252385e+18, |
| "learning_rate": 1.738866873842785e-05, |
| "loss": 6.7019, |
| "loss/crossentropy": 2.1938077017664908, |
| "loss/hidden": 2.9220703125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15224464647471905, |
| "step": 9260 |
| }, |
| { |
| "epoch": 0.927, |
| "grad_norm": 18.5, |
| "grad_norm_var": 3.015230302642294e+18, |
| "learning_rate": 1.7195790074547615e-05, |
| "loss": 6.6027, |
| "loss/crossentropy": 2.1094757497310637, |
| "loss/hidden": 2.9421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1458785830065608, |
| "step": 9270 |
| }, |
| { |
| "epoch": 0.928, |
| "grad_norm": 18.375, |
| "grad_norm_var": 0.6343098958333333, |
| "learning_rate": 1.7005243352409334e-05, |
| "loss": 6.5892, |
| "loss/crossentropy": 2.1868397384881972, |
| "loss/hidden": 2.9390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15736450720578432, |
| "step": 9280 |
| }, |
| { |
| "epoch": 0.929, |
| "grad_norm": 19.875, |
| "grad_norm_var": 0.6218098958333333, |
| "learning_rate": 1.681704032583234e-05, |
| "loss": 6.6312, |
| "loss/crossentropy": 2.102400007843971, |
| "loss/hidden": 2.959765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14601742010563612, |
| "step": 9290 |
| }, |
| { |
| "epoch": 0.93, |
| "grad_norm": 18.0, |
| "grad_norm_var": 0.90390625, |
| "learning_rate": 1.6631192604065855e-05, |
| "loss": 6.6723, |
| "loss/crossentropy": 2.1556510917842386, |
| "loss/hidden": 2.98984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15542004024609923, |
| "step": 9300 |
| }, |
| { |
| "epoch": 0.931, |
| "grad_norm": 18.625, |
| "grad_norm_var": 0.77265625, |
| "learning_rate": 1.644771165107277e-05, |
| "loss": 6.6506, |
| "loss/crossentropy": 2.025302214920521, |
| "loss/hidden": 2.92265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15237770597450434, |
| "step": 9310 |
| }, |
| { |
| "epoch": 0.932, |
| "grad_norm": 19.5, |
| "grad_norm_var": 0.5552083333333333, |
| "learning_rate": 1.6266608784822544e-05, |
| "loss": 6.6707, |
| "loss/crossentropy": 2.2078075274825095, |
| "loss/hidden": 2.873046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15092720007523894, |
| "step": 9320 |
| }, |
| { |
| "epoch": 0.933, |
| "grad_norm": 18.75, |
| "grad_norm_var": 6.516666666666667, |
| "learning_rate": 1.6087895176593026e-05, |
| "loss": 6.6366, |
| "loss/crossentropy": 2.1986721485853193, |
| "loss/hidden": 2.983984375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16585835479199887, |
| "step": 9330 |
| }, |
| { |
| "epoch": 0.934, |
| "grad_norm": 19.25, |
| "grad_norm_var": 7.455143229166667, |
| "learning_rate": 1.5911581850281403e-05, |
| "loss": 6.6058, |
| "loss/crossentropy": 2.2786722093820573, |
| "loss/hidden": 2.87734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14315823893994092, |
| "step": 9340 |
| }, |
| { |
| "epoch": 0.935, |
| "grad_norm": 19.0, |
| "grad_norm_var": 0.9405598958333333, |
| "learning_rate": 1.573767968172413e-05, |
| "loss": 6.6968, |
| "loss/crossentropy": 2.0444235846400263, |
| "loss/hidden": 2.94140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15067112175747752, |
| "step": 9350 |
| }, |
| { |
| "epoch": 0.936, |
| "grad_norm": 20.125, |
| "grad_norm_var": 0.41848958333333336, |
| "learning_rate": 1.556619939802615e-05, |
| "loss": 6.6068, |
| "loss/crossentropy": 2.088939316570759, |
| "loss/hidden": 2.976171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14950306443497538, |
| "step": 9360 |
| }, |
| { |
| "epoch": 0.937, |
| "grad_norm": 19.875, |
| "grad_norm_var": 0.47890625, |
| "learning_rate": 1.5397151576899065e-05, |
| "loss": 6.6725, |
| "loss/crossentropy": 2.221402445435524, |
| "loss/hidden": 3.01015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1524247596040368, |
| "step": 9370 |
| }, |
| { |
| "epoch": 0.938, |
| "grad_norm": 19.0, |
| "grad_norm_var": 0.8010416666666667, |
| "learning_rate": 1.5230546646008795e-05, |
| "loss": 6.5985, |
| "loss/crossentropy": 2.2344843059778214, |
| "loss/hidden": 2.925, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15453678853809832, |
| "step": 9380 |
| }, |
| { |
| "epoch": 0.939, |
| "grad_norm": 18.25, |
| "grad_norm_var": 0.8322916666666667, |
| "learning_rate": 1.506639488233226e-05, |
| "loss": 6.6656, |
| "loss/crossentropy": 2.1397018134593964, |
| "loss/hidden": 2.937890625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15283725168555976, |
| "step": 9390 |
| }, |
| { |
| "epoch": 0.94, |
| "grad_norm": 20.25, |
| "grad_norm_var": 2.08515625, |
| "learning_rate": 1.490470641152345e-05, |
| "loss": 6.6495, |
| "loss/crossentropy": 2.180284807085991, |
| "loss/hidden": 3.04765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1779416125267744, |
| "step": 9400 |
| }, |
| { |
| "epoch": 0.941, |
| "grad_norm": 18.375, |
| "grad_norm_var": 1.56640625, |
| "learning_rate": 1.4745491207288874e-05, |
| "loss": 6.7046, |
| "loss/crossentropy": 1.9674726322293281, |
| "loss/hidden": 2.985546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14737507775425912, |
| "step": 9410 |
| }, |
| { |
| "epoch": 0.942, |
| "grad_norm": 19.125, |
| "grad_norm_var": 0.9546223958333333, |
| "learning_rate": 1.4588759090772302e-05, |
| "loss": 6.645, |
| "loss/crossentropy": 1.932501320540905, |
| "loss/hidden": 2.90859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.13745289896614848, |
| "step": 9420 |
| }, |
| { |
| "epoch": 0.943, |
| "grad_norm": 19.0, |
| "grad_norm_var": 0.4114583333333333, |
| "learning_rate": 1.4434519729948942e-05, |
| "loss": 6.7417, |
| "loss/crossentropy": 2.132400029152632, |
| "loss/hidden": 2.90859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1424996647052467, |
| "step": 9430 |
| }, |
| { |
| "epoch": 0.944, |
| "grad_norm": 18.625, |
| "grad_norm_var": 0.9184895833333333, |
| "learning_rate": 1.428278263902913e-05, |
| "loss": 6.6339, |
| "loss/crossentropy": 2.091708117723465, |
| "loss/hidden": 2.963671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14837299454957248, |
| "step": 9440 |
| }, |
| { |
| "epoch": 0.945, |
| "grad_norm": 18.25, |
| "grad_norm_var": 0.4910807291666667, |
| "learning_rate": 1.413355717787134e-05, |
| "loss": 6.7095, |
| "loss/crossentropy": 2.028689184784889, |
| "loss/hidden": 2.925390625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.13867047037929298, |
| "step": 9450 |
| }, |
| { |
| "epoch": 0.946, |
| "grad_norm": 19.125, |
| "grad_norm_var": 0.3458333333333333, |
| "learning_rate": 1.3986852551404964e-05, |
| "loss": 6.7444, |
| "loss/crossentropy": 2.2080354422330855, |
| "loss/hidden": 2.956640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1599160810932517, |
| "step": 9460 |
| }, |
| { |
| "epoch": 0.947, |
| "grad_norm": 19.375, |
| "grad_norm_var": 0.2559895833333333, |
| "learning_rate": 1.3842677809062387e-05, |
| "loss": 6.6449, |
| "loss/crossentropy": 2.1781085431575775, |
| "loss/hidden": 2.95625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16033231243491172, |
| "step": 9470 |
| }, |
| { |
| "epoch": 0.948, |
| "grad_norm": 18.375, |
| "grad_norm_var": 0.34765625, |
| "learning_rate": 1.3701041844220849e-05, |
| "loss": 6.6578, |
| "loss/crossentropy": 2.149868738651276, |
| "loss/hidden": 2.9625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1523012975230813, |
| "step": 9480 |
| }, |
| { |
| "epoch": 0.949, |
| "grad_norm": 19.875, |
| "grad_norm_var": 0.665625, |
| "learning_rate": 1.3561953393653824e-05, |
| "loss": 6.5907, |
| "loss/crossentropy": 2.013017991185188, |
| "loss/hidden": 3.104296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16240219958126545, |
| "step": 9490 |
| }, |
| { |
| "epoch": 0.95, |
| "grad_norm": 18.375, |
| "grad_norm_var": 0.7427083333333333, |
| "learning_rate": 1.3425421036992098e-05, |
| "loss": 6.5356, |
| "loss/crossentropy": 2.1714504957199097, |
| "loss/hidden": 2.993359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14401763658970596, |
| "step": 9500 |
| }, |
| { |
| "epoch": 0.951, |
| "grad_norm": 18.75, |
| "grad_norm_var": 0.8369140625, |
| "learning_rate": 1.3291453196194565e-05, |
| "loss": 6.6629, |
| "loss/crossentropy": 2.0724572435021402, |
| "loss/hidden": 3.02578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1599965788424015, |
| "step": 9510 |
| }, |
| { |
| "epoch": 0.952, |
| "grad_norm": 18.875, |
| "grad_norm_var": 1.1143229166666666, |
| "learning_rate": 1.3160058135028691e-05, |
| "loss": 6.6944, |
| "loss/crossentropy": 2.1555590838193894, |
| "loss/hidden": 2.9203125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1390397796407342, |
| "step": 9520 |
| }, |
| { |
| "epoch": 0.953, |
| "grad_norm": 19.25, |
| "grad_norm_var": 1.3806640625, |
| "learning_rate": 1.3031243958560772e-05, |
| "loss": 6.5407, |
| "loss/crossentropy": 2.054419046640396, |
| "loss/hidden": 2.96171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15348356384783984, |
| "step": 9530 |
| }, |
| { |
| "epoch": 0.954, |
| "grad_norm": 21.125, |
| "grad_norm_var": 0.6129557291666666, |
| "learning_rate": 1.2905018612655975e-05, |
| "loss": 6.716, |
| "loss/crossentropy": 2.2037750400602816, |
| "loss/hidden": 2.976171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15483110053464771, |
| "step": 9540 |
| }, |
| { |
| "epoch": 0.955, |
| "grad_norm": 18.75, |
| "grad_norm_var": 16.742122395833334, |
| "learning_rate": 1.2781389883488218e-05, |
| "loss": 6.6118, |
| "loss/crossentropy": 2.2344287753105165, |
| "loss/hidden": 2.926953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1532795075327158, |
| "step": 9550 |
| }, |
| { |
| "epoch": 0.956, |
| "grad_norm": 18.5, |
| "grad_norm_var": 16.912239583333335, |
| "learning_rate": 1.2660365397059856e-05, |
| "loss": 6.5768, |
| "loss/crossentropy": 2.1113690607249738, |
| "loss/hidden": 2.871484375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15772182103246452, |
| "step": 9560 |
| }, |
| { |
| "epoch": 0.957, |
| "grad_norm": 18.5, |
| "grad_norm_var": 0.4327473958333333, |
| "learning_rate": 1.2541952618731295e-05, |
| "loss": 6.6605, |
| "loss/crossentropy": 2.11531500518322, |
| "loss/hidden": 3.0078125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16428461223840712, |
| "step": 9570 |
| }, |
| { |
| "epoch": 0.958, |
| "grad_norm": 19.125, |
| "grad_norm_var": 1.0853515625, |
| "learning_rate": 1.2426158852760462e-05, |
| "loss": 6.632, |
| "loss/crossentropy": 2.180248150229454, |
| "loss/hidden": 3.052734375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1568142266944051, |
| "step": 9580 |
| }, |
| { |
| "epoch": 0.959, |
| "grad_norm": 19.625, |
| "grad_norm_var": 0.33932291666666664, |
| "learning_rate": 1.2312991241852293e-05, |
| "loss": 6.7198, |
| "loss/crossentropy": 2.157143747806549, |
| "loss/hidden": 2.940234375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15367843862622976, |
| "step": 9590 |
| }, |
| { |
| "epoch": 0.96, |
| "grad_norm": 18.25, |
| "grad_norm_var": 0.4488932291666667, |
| "learning_rate": 1.2202456766718093e-05, |
| "loss": 6.7218, |
| "loss/crossentropy": 2.226324610412121, |
| "loss/hidden": 2.95546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1611587280407548, |
| "step": 9600 |
| }, |
| { |
| "epoch": 0.961, |
| "grad_norm": 19.125, |
| "grad_norm_var": 0.39889322916666664, |
| "learning_rate": 1.2094562245644947e-05, |
| "loss": 6.5949, |
| "loss/crossentropy": 2.206631433963776, |
| "loss/hidden": 2.99921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16857987977564334, |
| "step": 9610 |
| }, |
| { |
| "epoch": 0.962, |
| "grad_norm": 18.75, |
| "grad_norm_var": 0.7893229166666667, |
| "learning_rate": 1.1989314334075145e-05, |
| "loss": 6.6565, |
| "loss/crossentropy": 2.078006048500538, |
| "loss/hidden": 2.885546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.13547398131340743, |
| "step": 9620 |
| }, |
| { |
| "epoch": 0.963, |
| "grad_norm": 18.375, |
| "grad_norm_var": 1.2364583333333334, |
| "learning_rate": 1.1886719524195641e-05, |
| "loss": 6.6847, |
| "loss/crossentropy": 2.1102965086698533, |
| "loss/hidden": 3.138671875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17691021095961332, |
| "step": 9630 |
| }, |
| { |
| "epoch": 0.964, |
| "grad_norm": 18.75, |
| "grad_norm_var": 1.4379557291666667, |
| "learning_rate": 1.1786784144537563e-05, |
| "loss": 6.6631, |
| "loss/crossentropy": 2.0664229825139047, |
| "loss/hidden": 2.99921875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1560237266123295, |
| "step": 9640 |
| }, |
| { |
| "epoch": 0.965, |
| "grad_norm": 19.125, |
| "grad_norm_var": 0.30416666666666664, |
| "learning_rate": 1.168951435958588e-05, |
| "loss": 6.5308, |
| "loss/crossentropy": 2.1858409374952315, |
| "loss/hidden": 2.98828125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15965924076735974, |
| "step": 9650 |
| }, |
| { |
| "epoch": 0.966, |
| "grad_norm": 22.375, |
| "grad_norm_var": 1.6184895833333333, |
| "learning_rate": 1.1594916169399088e-05, |
| "loss": 6.6978, |
| "loss/crossentropy": 2.357753816246986, |
| "loss/hidden": 2.981640625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16278257928788661, |
| "step": 9660 |
| }, |
| { |
| "epoch": 0.967, |
| "grad_norm": 18.25, |
| "grad_norm_var": 1.6067057291666667, |
| "learning_rate": 1.1502995409239174e-05, |
| "loss": 6.6365, |
| "loss/crossentropy": 2.1270371586084367, |
| "loss/hidden": 2.872265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1369372447952628, |
| "step": 9670 |
| }, |
| { |
| "epoch": 0.968, |
| "grad_norm": 18.125, |
| "grad_norm_var": 1.0181640625, |
| "learning_rate": 1.1413757749211602e-05, |
| "loss": 6.5318, |
| "loss/crossentropy": 2.0077289439737798, |
| "loss/hidden": 2.90859375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1443242894485593, |
| "step": 9680 |
| }, |
| { |
| "epoch": 0.969, |
| "grad_norm": 19.125, |
| "grad_norm_var": 1.6197916666666667, |
| "learning_rate": 1.132720869391559e-05, |
| "loss": 6.6815, |
| "loss/crossentropy": 2.147722618281841, |
| "loss/hidden": 2.896875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1429056410677731, |
| "step": 9690 |
| }, |
| { |
| "epoch": 0.97, |
| "grad_norm": 19.25, |
| "grad_norm_var": 1.3723307291666667, |
| "learning_rate": 1.1243353582104556e-05, |
| "loss": 6.7836, |
| "loss/crossentropy": 2.157535497099161, |
| "loss/hidden": 3.10546875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16926438817754388, |
| "step": 9700 |
| }, |
| { |
| "epoch": 0.971, |
| "grad_norm": 19.625, |
| "grad_norm_var": 1.1962890625, |
| "learning_rate": 1.116219758635678e-05, |
| "loss": 6.5981, |
| "loss/crossentropy": 2.0965492375195027, |
| "loss/hidden": 2.94140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15332625713199377, |
| "step": 9710 |
| }, |
| { |
| "epoch": 0.972, |
| "grad_norm": 18.625, |
| "grad_norm_var": 1.2228515625, |
| "learning_rate": 1.1083745712756367e-05, |
| "loss": 6.741, |
| "loss/crossentropy": 2.0914740189909935, |
| "loss/hidden": 3.059375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15363588072359563, |
| "step": 9720 |
| }, |
| { |
| "epoch": 0.973, |
| "grad_norm": 21.625, |
| "grad_norm_var": 1.3393229166666667, |
| "learning_rate": 1.1008002800584424e-05, |
| "loss": 6.701, |
| "loss/crossentropy": 2.141095507889986, |
| "loss/hidden": 2.982421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15398450717329978, |
| "step": 9730 |
| }, |
| { |
| "epoch": 0.974, |
| "grad_norm": 19.25, |
| "grad_norm_var": 1.6205729166666667, |
| "learning_rate": 1.0934973522020538e-05, |
| "loss": 6.6305, |
| "loss/crossentropy": 2.1611881658434866, |
| "loss/hidden": 2.96328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14838593397289515, |
| "step": 9740 |
| }, |
| { |
| "epoch": 0.975, |
| "grad_norm": 19.375, |
| "grad_norm_var": 2.2900390625, |
| "learning_rate": 1.0864662381854632e-05, |
| "loss": 6.6726, |
| "loss/crossentropy": 2.0327648639678957, |
| "loss/hidden": 2.972265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16227712836116553, |
| "step": 9750 |
| }, |
| { |
| "epoch": 0.976, |
| "grad_norm": 20.5, |
| "grad_norm_var": 10.545572916666666, |
| "learning_rate": 1.0797073717209014e-05, |
| "loss": 6.6785, |
| "loss/crossentropy": 2.14007937759161, |
| "loss/hidden": 2.93515625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15520989168435334, |
| "step": 9760 |
| }, |
| { |
| "epoch": 0.977, |
| "grad_norm": 19.5, |
| "grad_norm_var": 0.8822265625, |
| "learning_rate": 1.0732211697270884e-05, |
| "loss": 6.6563, |
| "loss/crossentropy": 2.121374186873436, |
| "loss/hidden": 2.992578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16025999132543803, |
| "step": 9770 |
| }, |
| { |
| "epoch": 0.978, |
| "grad_norm": 18.75, |
| "grad_norm_var": 0.56640625, |
| "learning_rate": 1.0670080323035176e-05, |
| "loss": 6.6324, |
| "loss/crossentropy": 2.1174088306725025, |
| "loss/hidden": 2.96328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16041831383481622, |
| "step": 9780 |
| }, |
| { |
| "epoch": 0.979, |
| "grad_norm": 18.75, |
| "grad_norm_var": 0.9497395833333333, |
| "learning_rate": 1.0610683427057707e-05, |
| "loss": 6.724, |
| "loss/crossentropy": 2.211408218741417, |
| "loss/hidden": 2.91796875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14411625489592553, |
| "step": 9790 |
| }, |
| { |
| "epoch": 0.98, |
| "grad_norm": 19.25, |
| "grad_norm_var": 1.04765625, |
| "learning_rate": 1.0554024673218807e-05, |
| "loss": 6.7129, |
| "loss/crossentropy": 2.1691969078034163, |
| "loss/hidden": 2.94765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14351302795112134, |
| "step": 9800 |
| }, |
| { |
| "epoch": 0.981, |
| "grad_norm": 20.875, |
| "grad_norm_var": 2.7122395833333335, |
| "learning_rate": 1.0500107556497298e-05, |
| "loss": 6.6663, |
| "loss/crossentropy": 2.1762838467955588, |
| "loss/hidden": 2.947265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1569036815315485, |
| "step": 9810 |
| }, |
| { |
| "epoch": 0.982, |
| "grad_norm": 17.75, |
| "grad_norm_var": 2.512955729166667, |
| "learning_rate": 1.0448935402754912e-05, |
| "loss": 6.6726, |
| "loss/crossentropy": 2.022391739487648, |
| "loss/hidden": 3.057421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16687135621905327, |
| "step": 9820 |
| }, |
| { |
| "epoch": 0.983, |
| "grad_norm": 20.0, |
| "grad_norm_var": 0.9244140625, |
| "learning_rate": 1.0400511368531138e-05, |
| "loss": 6.6903, |
| "loss/crossentropy": 2.0364112444221973, |
| "loss/hidden": 2.961328125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16487439339980484, |
| "step": 9830 |
| }, |
| { |
| "epoch": 0.984, |
| "grad_norm": 19.75, |
| "grad_norm_var": 1.1723307291666667, |
| "learning_rate": 1.0354838440848503e-05, |
| "loss": 6.6904, |
| "loss/crossentropy": 2.009164361655712, |
| "loss/hidden": 2.976171875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15027653202414512, |
| "step": 9840 |
| }, |
| { |
| "epoch": 0.985, |
| "grad_norm": 19.625, |
| "grad_norm_var": 1.6759765625, |
| "learning_rate": 1.0311919437028318e-05, |
| "loss": 6.64, |
| "loss/crossentropy": 2.213483475893736, |
| "loss/hidden": 2.978125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1542889505624771, |
| "step": 9850 |
| }, |
| { |
| "epoch": 0.986, |
| "grad_norm": 18.125, |
| "grad_norm_var": 0.7705729166666667, |
| "learning_rate": 1.0271757004516918e-05, |
| "loss": 6.6129, |
| "loss/crossentropy": 2.0778297156095507, |
| "loss/hidden": 3.02421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.16476696096360682, |
| "step": 9860 |
| }, |
| { |
| "epoch": 0.987, |
| "grad_norm": 18.75, |
| "grad_norm_var": 8.672916666666667, |
| "learning_rate": 1.023435362072231e-05, |
| "loss": 6.6758, |
| "loss/crossentropy": 2.1646489948034286, |
| "loss/hidden": 2.969140625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15129716228693724, |
| "step": 9870 |
| }, |
| { |
| "epoch": 0.988, |
| "grad_norm": 20.75, |
| "grad_norm_var": 0.7624348958333333, |
| "learning_rate": 1.01997115928614e-05, |
| "loss": 6.728, |
| "loss/crossentropy": 2.2021071404218673, |
| "loss/hidden": 2.879296875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14578848090022803, |
| "step": 9880 |
| }, |
| { |
| "epoch": 0.989, |
| "grad_norm": 17.625, |
| "grad_norm_var": 8.957291666666666, |
| "learning_rate": 1.0167833057817644e-05, |
| "loss": 6.6842, |
| "loss/crossentropy": 2.0844334810972214, |
| "loss/hidden": 3.037109375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.17800515107810497, |
| "step": 9890 |
| }, |
| { |
| "epoch": 0.99, |
| "grad_norm": 18.75, |
| "grad_norm_var": 9.4447265625, |
| "learning_rate": 1.0138719982009242e-05, |
| "loss": 6.5745, |
| "loss/crossentropy": 2.064706768095493, |
| "loss/hidden": 3.04765625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15266596768051385, |
| "step": 9900 |
| }, |
| { |
| "epoch": 0.991, |
| "grad_norm": 19.875, |
| "grad_norm_var": 1.0916666666666666, |
| "learning_rate": 1.0112374161267848e-05, |
| "loss": 6.7953, |
| "loss/crossentropy": 2.100151504948735, |
| "loss/hidden": 3.007421875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15070506385527552, |
| "step": 9910 |
| }, |
| { |
| "epoch": 0.992, |
| "grad_norm": 18.875, |
| "grad_norm_var": 0.6893229166666667, |
| "learning_rate": 1.008879722072778e-05, |
| "loss": 6.7148, |
| "loss/crossentropy": 2.1347645312547683, |
| "loss/hidden": 2.993359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1627992235124111, |
| "step": 9920 |
| }, |
| { |
| "epoch": 0.993, |
| "grad_norm": 19.375, |
| "grad_norm_var": 0.7395833333333334, |
| "learning_rate": 1.0067990614725795e-05, |
| "loss": 6.6596, |
| "loss/crossentropy": 2.020579759031534, |
| "loss/hidden": 2.92578125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14832647573202848, |
| "step": 9930 |
| }, |
| { |
| "epoch": 0.994, |
| "grad_norm": 20.5, |
| "grad_norm_var": 0.6395833333333333, |
| "learning_rate": 1.0049955626711355e-05, |
| "loss": 6.6734, |
| "loss/crossentropy": 2.1040701150894163, |
| "loss/hidden": 3.025, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15983649622648954, |
| "step": 9940 |
| }, |
| { |
| "epoch": 0.995, |
| "grad_norm": 18.75, |
| "grad_norm_var": 0.5171223958333333, |
| "learning_rate": 1.003469336916747e-05, |
| "loss": 6.764, |
| "loss/crossentropy": 2.0992766320705414, |
| "loss/hidden": 2.973046875, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15146102383732796, |
| "step": 9950 |
| }, |
| { |
| "epoch": 0.996, |
| "grad_norm": 19.0, |
| "grad_norm_var": 0.6434895833333333, |
| "learning_rate": 1.002220478354208e-05, |
| "loss": 6.7187, |
| "loss/crossentropy": 2.085821607708931, |
| "loss/hidden": 3.072265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.15508200749754905, |
| "step": 9960 |
| }, |
| { |
| "epoch": 0.997, |
| "grad_norm": 20.625, |
| "grad_norm_var": 1.0739583333333333, |
| "learning_rate": 1.0012490640189975e-05, |
| "loss": 6.6748, |
| "loss/crossentropy": 2.0770878665149213, |
| "loss/hidden": 2.997265625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14898568596690892, |
| "step": 9970 |
| }, |
| { |
| "epoch": 0.998, |
| "grad_norm": 21.5, |
| "grad_norm_var": 1.76640625, |
| "learning_rate": 1.0005551538325275e-05, |
| "loss": 6.6931, |
| "loss/crossentropy": 2.3164404213428496, |
| "loss/hidden": 2.86015625, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1461044853553176, |
| "step": 9980 |
| }, |
| { |
| "epoch": 0.999, |
| "grad_norm": 21.5, |
| "grad_norm_var": 1.7613932291666667, |
| "learning_rate": 1.0001387905984467e-05, |
| "loss": 6.7299, |
| "loss/crossentropy": 2.1300622135400773, |
| "loss/hidden": 2.901953125, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.1424020084552467, |
| "step": 9990 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 18.875, |
| "grad_norm_var": 0.70625, |
| "learning_rate": 1e-05, |
| "loss": 6.6101, |
| "loss/crossentropy": 2.187693312764168, |
| "loss/hidden": 2.918359375, |
| "loss/jsd": 0.0, |
| "loss/logits": 0.14794533271342517, |
| "step": 10000 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 10000, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 9223372036854775807, |
| "save_steps": 2000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2.8575100320088064e+19, |
| "train_batch_size": 2, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|