| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.9999240179317681, |
| "eval_steps": 160, |
| "global_step": 3290, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.0003039282729275891, |
| "grad_norm": 5.229409217834473, |
| "learning_rate": 0.0, |
| "loss": 1.2164, |
| "mean_token_accuracy": 0.7386507540941238, |
| "num_tokens": 87378.0, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.007598206823189727, |
| "grad_norm": 4.077776908874512, |
| "learning_rate": 1.2121212121212122e-06, |
| "loss": 1.3951, |
| "mean_token_accuracy": 0.7043480854481459, |
| "num_tokens": 2555177.0, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.015196413646379454, |
| "grad_norm": 3.2261855602264404, |
| "learning_rate": 2.474747474747475e-06, |
| "loss": 1.3716, |
| "mean_token_accuracy": 0.7009463596343994, |
| "num_tokens": 5171296.0, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.02279462046956918, |
| "grad_norm": 1.221886396408081, |
| "learning_rate": 3.737373737373738e-06, |
| "loss": 1.2697, |
| "mean_token_accuracy": 0.7165025919675827, |
| "num_tokens": 7791520.0, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.030392827292758908, |
| "grad_norm": 1.2025024890899658, |
| "learning_rate": 5e-06, |
| "loss": 1.2611, |
| "mean_token_accuracy": 0.7154420650005341, |
| "num_tokens": 10428214.0, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.03799103411594864, |
| "grad_norm": 1.2315771579742432, |
| "learning_rate": 4.960827326856785e-06, |
| "loss": 1.2618, |
| "mean_token_accuracy": 0.7133645015954971, |
| "num_tokens": 13035315.0, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.04558924093913836, |
| "grad_norm": 1.1814574003219604, |
| "learning_rate": 4.92165465371357e-06, |
| "loss": 1.2494, |
| "mean_token_accuracy": 0.7152758586406708, |
| "num_tokens": 15555229.0, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.05318744776232809, |
| "grad_norm": 1.0967789888381958, |
| "learning_rate": 4.882481980570354e-06, |
| "loss": 1.1929, |
| "mean_token_accuracy": 0.7268460893630981, |
| "num_tokens": 18110892.0, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.060785654585517816, |
| "grad_norm": 1.38316011428833, |
| "learning_rate": 4.843309307427139e-06, |
| "loss": 1.2274, |
| "mean_token_accuracy": 0.7177506709098815, |
| "num_tokens": 20638485.0, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.06838386140870754, |
| "grad_norm": 1.1938471794128418, |
| "learning_rate": 4.804136634283924e-06, |
| "loss": 1.2036, |
| "mean_token_accuracy": 0.7217154312133789, |
| "num_tokens": 23186502.0, |
| "step": 225 |
| }, |
| { |
| "epoch": 0.07598206823189728, |
| "grad_norm": 1.2482621669769287, |
| "learning_rate": 4.764963961140708e-06, |
| "loss": 1.1962, |
| "mean_token_accuracy": 0.7237263369560242, |
| "num_tokens": 25737398.0, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.083580275055087, |
| "grad_norm": 0.9959760308265686, |
| "learning_rate": 4.725791287997493e-06, |
| "loss": 1.1908, |
| "mean_token_accuracy": 0.7237645584344864, |
| "num_tokens": 28386635.0, |
| "step": 275 |
| }, |
| { |
| "epoch": 0.09117848187827672, |
| "grad_norm": 1.1848807334899902, |
| "learning_rate": 4.686618614854278e-06, |
| "loss": 1.1968, |
| "mean_token_accuracy": 0.7229974180459976, |
| "num_tokens": 30963803.0, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.09877668870146646, |
| "grad_norm": 1.3056175708770752, |
| "learning_rate": 4.647445941711063e-06, |
| "loss": 1.1937, |
| "mean_token_accuracy": 0.7229531377553939, |
| "num_tokens": 33594219.0, |
| "step": 325 |
| }, |
| { |
| "epoch": 0.10637489552465618, |
| "grad_norm": 1.135485053062439, |
| "learning_rate": 4.608273268567847e-06, |
| "loss": 1.1815, |
| "mean_token_accuracy": 0.724581116437912, |
| "num_tokens": 36131516.0, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.11397310234784591, |
| "grad_norm": 0.9885596036911011, |
| "learning_rate": 4.569100595424632e-06, |
| "loss": 1.1588, |
| "mean_token_accuracy": 0.7286039453744888, |
| "num_tokens": 38679243.0, |
| "step": 375 |
| }, |
| { |
| "epoch": 0.12157130917103563, |
| "grad_norm": 0.9995855093002319, |
| "learning_rate": 4.529927922281417e-06, |
| "loss": 1.1754, |
| "mean_token_accuracy": 0.7271614295244216, |
| "num_tokens": 41253914.0, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.12916951599422535, |
| "grad_norm": 0.9095941185951233, |
| "learning_rate": 4.490755249138202e-06, |
| "loss": 1.2398, |
| "mean_token_accuracy": 0.7160615402460099, |
| "num_tokens": 43939124.0, |
| "step": 425 |
| }, |
| { |
| "epoch": 0.1367677228174151, |
| "grad_norm": 1.0696874856948853, |
| "learning_rate": 4.451582575994986e-06, |
| "loss": 1.183, |
| "mean_token_accuracy": 0.7253994250297546, |
| "num_tokens": 46572312.0, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.14436592964060482, |
| "grad_norm": 1.0358718633651733, |
| "learning_rate": 4.41240990285177e-06, |
| "loss": 1.1931, |
| "mean_token_accuracy": 0.7237947028875351, |
| "num_tokens": 49064213.0, |
| "step": 475 |
| }, |
| { |
| "epoch": 0.15196413646379456, |
| "grad_norm": 0.9893081784248352, |
| "learning_rate": 4.373237229708556e-06, |
| "loss": 1.1951, |
| "mean_token_accuracy": 0.7238883310556412, |
| "num_tokens": 51663643.0, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.15956234328698427, |
| "grad_norm": 1.0677660703659058, |
| "learning_rate": 4.334064556565341e-06, |
| "loss": 1.1721, |
| "mean_token_accuracy": 0.7265544033050537, |
| "num_tokens": 54161634.0, |
| "step": 525 |
| }, |
| { |
| "epoch": 0.167160550110174, |
| "grad_norm": 0.8927258849143982, |
| "learning_rate": 4.294891883422124e-06, |
| "loss": 1.1685, |
| "mean_token_accuracy": 0.7285602086782456, |
| "num_tokens": 56784150.0, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.17475875693336373, |
| "grad_norm": 0.9111976027488708, |
| "learning_rate": 4.25571921027891e-06, |
| "loss": 1.1562, |
| "mean_token_accuracy": 0.7312729161977768, |
| "num_tokens": 59382430.0, |
| "step": 575 |
| }, |
| { |
| "epoch": 0.18235696375655344, |
| "grad_norm": 0.8939260244369507, |
| "learning_rate": 4.216546537135695e-06, |
| "loss": 1.1818, |
| "mean_token_accuracy": 0.7256826394796372, |
| "num_tokens": 61928694.0, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.18995517057974318, |
| "grad_norm": 0.9611142873764038, |
| "learning_rate": 4.177373863992479e-06, |
| "loss": 1.1793, |
| "mean_token_accuracy": 0.7260627967119216, |
| "num_tokens": 64496365.0, |
| "step": 625 |
| }, |
| { |
| "epoch": 0.1975533774029329, |
| "grad_norm": 0.9116730690002441, |
| "learning_rate": 4.138201190849264e-06, |
| "loss": 1.149, |
| "mean_token_accuracy": 0.7322022247314454, |
| "num_tokens": 67108712.0, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.20515158422612265, |
| "grad_norm": 0.9878592491149902, |
| "learning_rate": 4.099028517706049e-06, |
| "loss": 1.1509, |
| "mean_token_accuracy": 0.7325534474849701, |
| "num_tokens": 69702021.0, |
| "step": 675 |
| }, |
| { |
| "epoch": 0.21274979104931235, |
| "grad_norm": 0.9378741979598999, |
| "learning_rate": 4.059855844562833e-06, |
| "loss": 1.1801, |
| "mean_token_accuracy": 0.727109580039978, |
| "num_tokens": 72301699.0, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.2203479978725021, |
| "grad_norm": 1.0023488998413086, |
| "learning_rate": 4.020683171419618e-06, |
| "loss": 1.1814, |
| "mean_token_accuracy": 0.7265605771541596, |
| "num_tokens": 74824515.0, |
| "step": 725 |
| }, |
| { |
| "epoch": 0.22794620469569182, |
| "grad_norm": 0.9925772547721863, |
| "learning_rate": 3.981510498276403e-06, |
| "loss": 1.1932, |
| "mean_token_accuracy": 0.7241325139999389, |
| "num_tokens": 77468439.0, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.23554441151888153, |
| "grad_norm": 1.0404661893844604, |
| "learning_rate": 3.942337825133187e-06, |
| "loss": 1.1525, |
| "mean_token_accuracy": 0.7301458239555358, |
| "num_tokens": 80143332.0, |
| "step": 775 |
| }, |
| { |
| "epoch": 0.24314261834207127, |
| "grad_norm": 0.9634000062942505, |
| "learning_rate": 3.903165151989972e-06, |
| "loss": 1.1691, |
| "mean_token_accuracy": 0.7280007082223893, |
| "num_tokens": 82731678.0, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.25074082516526097, |
| "grad_norm": 0.9083330631256104, |
| "learning_rate": 3.863992478846757e-06, |
| "loss": 1.1272, |
| "mean_token_accuracy": 0.7353595513105392, |
| "num_tokens": 85345821.0, |
| "step": 825 |
| }, |
| { |
| "epoch": 0.2583390319884507, |
| "grad_norm": 0.9275352358818054, |
| "learning_rate": 3.824819805703541e-06, |
| "loss": 1.1558, |
| "mean_token_accuracy": 0.7300055593252182, |
| "num_tokens": 87948235.0, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.26593723881164044, |
| "grad_norm": 0.9232833385467529, |
| "learning_rate": 3.785647132560326e-06, |
| "loss": 1.162, |
| "mean_token_accuracy": 0.7284415501356125, |
| "num_tokens": 90574681.0, |
| "step": 875 |
| }, |
| { |
| "epoch": 0.2735354456348302, |
| "grad_norm": 1.131739854812622, |
| "learning_rate": 3.746474459417111e-06, |
| "loss": 1.1697, |
| "mean_token_accuracy": 0.7278369426727295, |
| "num_tokens": 93215297.0, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.2811336524580199, |
| "grad_norm": 0.9522098898887634, |
| "learning_rate": 3.7073017862738957e-06, |
| "loss": 1.1846, |
| "mean_token_accuracy": 0.7259439510107041, |
| "num_tokens": 95791861.0, |
| "step": 925 |
| }, |
| { |
| "epoch": 0.28873185928120965, |
| "grad_norm": 1.0696117877960205, |
| "learning_rate": 3.66812911313068e-06, |
| "loss": 1.1394, |
| "mean_token_accuracy": 0.733702262043953, |
| "num_tokens": 98410562.0, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.2963300661043994, |
| "grad_norm": 0.9875262379646301, |
| "learning_rate": 3.628956439987465e-06, |
| "loss": 1.1214, |
| "mean_token_accuracy": 0.7367280977964401, |
| "num_tokens": 101018834.0, |
| "step": 975 |
| }, |
| { |
| "epoch": 0.3039282729275891, |
| "grad_norm": 1.0046495199203491, |
| "learning_rate": 3.5897837668442497e-06, |
| "loss": 1.1471, |
| "mean_token_accuracy": 0.7323124688863755, |
| "num_tokens": 103548660.0, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.3115264797507788, |
| "grad_norm": 0.9220812320709229, |
| "learning_rate": 3.550611093701035e-06, |
| "loss": 1.1144, |
| "mean_token_accuracy": 0.7385808283090591, |
| "num_tokens": 106144886.0, |
| "step": 1025 |
| }, |
| { |
| "epoch": 0.31912468657396853, |
| "grad_norm": 0.84840327501297, |
| "learning_rate": 3.511438420557819e-06, |
| "loss": 1.1292, |
| "mean_token_accuracy": 0.7352547180652619, |
| "num_tokens": 108737841.0, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.32672289339715826, |
| "grad_norm": 0.9784463047981262, |
| "learning_rate": 3.4722657474146037e-06, |
| "loss": 1.1397, |
| "mean_token_accuracy": 0.7329702425003052, |
| "num_tokens": 111263726.0, |
| "step": 1075 |
| }, |
| { |
| "epoch": 0.334321100220348, |
| "grad_norm": 0.9851428270339966, |
| "learning_rate": 3.433093074271389e-06, |
| "loss": 1.1522, |
| "mean_token_accuracy": 0.7319200646877289, |
| "num_tokens": 113862710.0, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.34191930704353773, |
| "grad_norm": 1.0207332372665405, |
| "learning_rate": 3.3939204011281735e-06, |
| "loss": 1.1596, |
| "mean_token_accuracy": 0.7311873000860214, |
| "num_tokens": 116455029.0, |
| "step": 1125 |
| }, |
| { |
| "epoch": 0.34951751386672747, |
| "grad_norm": 1.057591199874878, |
| "learning_rate": 3.3547477279849577e-06, |
| "loss": 1.1469, |
| "mean_token_accuracy": 0.7317946165800094, |
| "num_tokens": 119016227.0, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.3571157206899172, |
| "grad_norm": 0.8993873596191406, |
| "learning_rate": 3.3155750548417424e-06, |
| "loss": 1.166, |
| "mean_token_accuracy": 0.7290427106618881, |
| "num_tokens": 121623537.0, |
| "step": 1175 |
| }, |
| { |
| "epoch": 0.3647139275131069, |
| "grad_norm": 0.9834045171737671, |
| "learning_rate": 3.2764023816985275e-06, |
| "loss": 1.1433, |
| "mean_token_accuracy": 0.7334721457958221, |
| "num_tokens": 124161994.0, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.3723121343362966, |
| "grad_norm": 0.9397407174110413, |
| "learning_rate": 3.237229708555312e-06, |
| "loss": 1.1613, |
| "mean_token_accuracy": 0.7288734668493271, |
| "num_tokens": 126771313.0, |
| "step": 1225 |
| }, |
| { |
| "epoch": 0.37991034115948635, |
| "grad_norm": 0.8688826560974121, |
| "learning_rate": 3.1980570354120964e-06, |
| "loss": 1.1314, |
| "mean_token_accuracy": 0.7348222607374191, |
| "num_tokens": 129372256.0, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.3875085479826761, |
| "grad_norm": 0.9596332311630249, |
| "learning_rate": 3.1588843622688815e-06, |
| "loss": 1.1771, |
| "mean_token_accuracy": 0.7289222145080566, |
| "num_tokens": 132004952.0, |
| "step": 1275 |
| }, |
| { |
| "epoch": 0.3951067548058658, |
| "grad_norm": 0.8962276577949524, |
| "learning_rate": 3.119711689125666e-06, |
| "loss": 1.13, |
| "mean_token_accuracy": 0.7361785328388214, |
| "num_tokens": 134597308.0, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.40270496162905556, |
| "grad_norm": 0.9526214599609375, |
| "learning_rate": 3.0805390159824512e-06, |
| "loss": 1.1279, |
| "mean_token_accuracy": 0.7370401775836944, |
| "num_tokens": 137154477.0, |
| "step": 1325 |
| }, |
| { |
| "epoch": 0.4103031684522453, |
| "grad_norm": 0.9959564208984375, |
| "learning_rate": 3.0413663428392355e-06, |
| "loss": 1.1501, |
| "mean_token_accuracy": 0.7321191453933715, |
| "num_tokens": 139811600.0, |
| "step": 1350 |
| }, |
| { |
| "epoch": 0.41790137527543497, |
| "grad_norm": 0.8501905798912048, |
| "learning_rate": 3.00219366969602e-06, |
| "loss": 1.1509, |
| "mean_token_accuracy": 0.7308077716827392, |
| "num_tokens": 142453993.0, |
| "step": 1375 |
| }, |
| { |
| "epoch": 0.4254995820986247, |
| "grad_norm": 0.913529098033905, |
| "learning_rate": 2.9630209965528052e-06, |
| "loss": 1.1338, |
| "mean_token_accuracy": 0.7351639837026596, |
| "num_tokens": 144957196.0, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.43309778892181444, |
| "grad_norm": 0.9400737285614014, |
| "learning_rate": 2.92384832340959e-06, |
| "loss": 1.1322, |
| "mean_token_accuracy": 0.7360527998209, |
| "num_tokens": 147574322.0, |
| "step": 1425 |
| }, |
| { |
| "epoch": 0.4406959957450042, |
| "grad_norm": 0.9140069484710693, |
| "learning_rate": 2.884675650266374e-06, |
| "loss": 1.1359, |
| "mean_token_accuracy": 0.7348436897993088, |
| "num_tokens": 150085697.0, |
| "step": 1450 |
| }, |
| { |
| "epoch": 0.4482942025681939, |
| "grad_norm": 0.9307470321655273, |
| "learning_rate": 2.8455029771231592e-06, |
| "loss": 1.1537, |
| "mean_token_accuracy": 0.7332108038663864, |
| "num_tokens": 152693678.0, |
| "step": 1475 |
| }, |
| { |
| "epoch": 0.45589240939138365, |
| "grad_norm": 0.9836713671684265, |
| "learning_rate": 2.806330303979944e-06, |
| "loss": 1.1398, |
| "mean_token_accuracy": 0.7350041055679322, |
| "num_tokens": 155307631.0, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.4634906162145734, |
| "grad_norm": 0.9581474661827087, |
| "learning_rate": 2.7671576308367286e-06, |
| "loss": 1.1213, |
| "mean_token_accuracy": 0.7367819517850875, |
| "num_tokens": 157931564.0, |
| "step": 1525 |
| }, |
| { |
| "epoch": 0.47108882303776306, |
| "grad_norm": 0.9196196794509888, |
| "learning_rate": 2.727984957693513e-06, |
| "loss": 1.1416, |
| "mean_token_accuracy": 0.7337223035097122, |
| "num_tokens": 160523557.0, |
| "step": 1550 |
| }, |
| { |
| "epoch": 0.4786870298609528, |
| "grad_norm": 1.0535234212875366, |
| "learning_rate": 2.688812284550298e-06, |
| "loss": 1.1376, |
| "mean_token_accuracy": 0.7336768537759781, |
| "num_tokens": 163117027.0, |
| "step": 1575 |
| }, |
| { |
| "epoch": 0.48628523668414253, |
| "grad_norm": 0.9108763337135315, |
| "learning_rate": 2.6496396114070826e-06, |
| "loss": 1.1186, |
| "mean_token_accuracy": 0.7363489520549774, |
| "num_tokens": 165577284.0, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.49388344350733226, |
| "grad_norm": 0.914046585559845, |
| "learning_rate": 2.6104669382638677e-06, |
| "loss": 1.1193, |
| "mean_token_accuracy": 0.7358357053995133, |
| "num_tokens": 168176206.0, |
| "step": 1625 |
| }, |
| { |
| "epoch": 0.5014816503305219, |
| "grad_norm": 0.9301220774650574, |
| "learning_rate": 2.571294265120652e-06, |
| "loss": 1.1359, |
| "mean_token_accuracy": 0.7337970972061157, |
| "num_tokens": 170860276.0, |
| "step": 1650 |
| }, |
| { |
| "epoch": 0.5090798571537117, |
| "grad_norm": 1.1479674577713013, |
| "learning_rate": 2.5321215919774366e-06, |
| "loss": 1.1339, |
| "mean_token_accuracy": 0.7356235873699188, |
| "num_tokens": 173397712.0, |
| "step": 1675 |
| }, |
| { |
| "epoch": 0.5166780639769014, |
| "grad_norm": 0.9349409937858582, |
| "learning_rate": 2.4929489188342217e-06, |
| "loss": 1.1381, |
| "mean_token_accuracy": 0.7338095015287399, |
| "num_tokens": 175907043.0, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.5242762708000912, |
| "grad_norm": 0.9135065078735352, |
| "learning_rate": 2.453776245691006e-06, |
| "loss": 1.1424, |
| "mean_token_accuracy": 0.7341409718990326, |
| "num_tokens": 178454221.0, |
| "step": 1725 |
| }, |
| { |
| "epoch": 0.5318744776232809, |
| "grad_norm": 0.8928455710411072, |
| "learning_rate": 2.414603572547791e-06, |
| "loss": 1.1486, |
| "mean_token_accuracy": 0.7311108547449112, |
| "num_tokens": 181045005.0, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.5394726844464707, |
| "grad_norm": 0.980609118938446, |
| "learning_rate": 2.3754308994045757e-06, |
| "loss": 1.1368, |
| "mean_token_accuracy": 0.7330597722530365, |
| "num_tokens": 183666329.0, |
| "step": 1775 |
| }, |
| { |
| "epoch": 0.5470708912696604, |
| "grad_norm": 0.9772244691848755, |
| "learning_rate": 2.3362582262613603e-06, |
| "loss": 1.1105, |
| "mean_token_accuracy": 0.7395724445581436, |
| "num_tokens": 186221096.0, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.55466909809285, |
| "grad_norm": 0.9453775882720947, |
| "learning_rate": 2.297085553118145e-06, |
| "loss": 1.1207, |
| "mean_token_accuracy": 0.737914999127388, |
| "num_tokens": 188781580.0, |
| "step": 1825 |
| }, |
| { |
| "epoch": 0.5622673049160398, |
| "grad_norm": 0.9014413952827454, |
| "learning_rate": 2.2579128799749297e-06, |
| "loss": 1.1436, |
| "mean_token_accuracy": 0.7317037135362625, |
| "num_tokens": 191380677.0, |
| "step": 1850 |
| }, |
| { |
| "epoch": 0.5698655117392295, |
| "grad_norm": 0.8946366310119629, |
| "learning_rate": 2.2187402068317143e-06, |
| "loss": 1.1548, |
| "mean_token_accuracy": 0.7314022815227509, |
| "num_tokens": 193996266.0, |
| "step": 1875 |
| }, |
| { |
| "epoch": 0.5774637185624193, |
| "grad_norm": 0.9767251014709473, |
| "learning_rate": 2.179567533688499e-06, |
| "loss": 1.1258, |
| "mean_token_accuracy": 0.737195520401001, |
| "num_tokens": 196548577.0, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.585061925385609, |
| "grad_norm": 1.0095189809799194, |
| "learning_rate": 2.1403948605452837e-06, |
| "loss": 1.1481, |
| "mean_token_accuracy": 0.7341015815734864, |
| "num_tokens": 199121126.0, |
| "step": 1925 |
| }, |
| { |
| "epoch": 0.5926601322087988, |
| "grad_norm": 0.9052937030792236, |
| "learning_rate": 2.1012221874020688e-06, |
| "loss": 1.1309, |
| "mean_token_accuracy": 0.7348120081424713, |
| "num_tokens": 201731098.0, |
| "step": 1950 |
| }, |
| { |
| "epoch": 0.6002583390319884, |
| "grad_norm": 0.9695770144462585, |
| "learning_rate": 2.062049514258853e-06, |
| "loss": 1.1248, |
| "mean_token_accuracy": 0.7372716355323792, |
| "num_tokens": 204324674.0, |
| "step": 1975 |
| }, |
| { |
| "epoch": 0.6078565458551782, |
| "grad_norm": 0.9187005758285522, |
| "learning_rate": 2.022876841115638e-06, |
| "loss": 1.1275, |
| "mean_token_accuracy": 0.7356339997053146, |
| "num_tokens": 206937271.0, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.6154547526783679, |
| "grad_norm": 0.9396357536315918, |
| "learning_rate": 1.9837041679724223e-06, |
| "loss": 1.1475, |
| "mean_token_accuracy": 0.7320456486940384, |
| "num_tokens": 209530276.0, |
| "step": 2025 |
| }, |
| { |
| "epoch": 0.6230529595015576, |
| "grad_norm": 0.9308034181594849, |
| "learning_rate": 1.9445314948292074e-06, |
| "loss": 1.107, |
| "mean_token_accuracy": 0.7407030069828033, |
| "num_tokens": 212127023.0, |
| "step": 2050 |
| }, |
| { |
| "epoch": 0.6306511663247474, |
| "grad_norm": 0.9127600193023682, |
| "learning_rate": 1.9053588216859919e-06, |
| "loss": 1.117, |
| "mean_token_accuracy": 0.7379663151502609, |
| "num_tokens": 214698253.0, |
| "step": 2075 |
| }, |
| { |
| "epoch": 0.6382493731479371, |
| "grad_norm": 1.0522152185440063, |
| "learning_rate": 1.8661861485427768e-06, |
| "loss": 1.1134, |
| "mean_token_accuracy": 0.738114013671875, |
| "num_tokens": 217256660.0, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.6458475799711269, |
| "grad_norm": 1.1676892042160034, |
| "learning_rate": 1.8270134753995614e-06, |
| "loss": 1.1501, |
| "mean_token_accuracy": 0.7336319923400879, |
| "num_tokens": 219876024.0, |
| "step": 2125 |
| }, |
| { |
| "epoch": 0.6534457867943165, |
| "grad_norm": 0.950326144695282, |
| "learning_rate": 1.7878408022563463e-06, |
| "loss": 1.1027, |
| "mean_token_accuracy": 0.7408947384357453, |
| "num_tokens": 222494174.0, |
| "step": 2150 |
| }, |
| { |
| "epoch": 0.6610439936175063, |
| "grad_norm": 0.9229258298873901, |
| "learning_rate": 1.7486681291131308e-06, |
| "loss": 1.149, |
| "mean_token_accuracy": 0.7321878397464752, |
| "num_tokens": 225084016.0, |
| "step": 2175 |
| }, |
| { |
| "epoch": 0.668642200440696, |
| "grad_norm": 0.8653574585914612, |
| "learning_rate": 1.7094954559699156e-06, |
| "loss": 1.1264, |
| "mean_token_accuracy": 0.7363163530826569, |
| "num_tokens": 227745579.0, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.6762404072638857, |
| "grad_norm": 0.8321031332015991, |
| "learning_rate": 1.6703227828267e-06, |
| "loss": 1.1262, |
| "mean_token_accuracy": 0.7373967486619949, |
| "num_tokens": 230365604.0, |
| "step": 2225 |
| }, |
| { |
| "epoch": 0.6838386140870755, |
| "grad_norm": 0.9620904326438904, |
| "learning_rate": 1.631150109683485e-06, |
| "loss": 1.104, |
| "mean_token_accuracy": 0.7394238811731338, |
| "num_tokens": 232943997.0, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.6914368209102651, |
| "grad_norm": 0.9633401036262512, |
| "learning_rate": 1.5919774365402697e-06, |
| "loss": 1.1312, |
| "mean_token_accuracy": 0.7345698297023773, |
| "num_tokens": 235514397.0, |
| "step": 2275 |
| }, |
| { |
| "epoch": 0.6990350277334549, |
| "grad_norm": 1.049621343612671, |
| "learning_rate": 1.5528047633970545e-06, |
| "loss": 1.1038, |
| "mean_token_accuracy": 0.7410222667455674, |
| "num_tokens": 237958000.0, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.7066332345566446, |
| "grad_norm": 0.8942002654075623, |
| "learning_rate": 1.513632090253839e-06, |
| "loss": 1.128, |
| "mean_token_accuracy": 0.7355525487661362, |
| "num_tokens": 240563561.0, |
| "step": 2325 |
| }, |
| { |
| "epoch": 0.7142314413798344, |
| "grad_norm": 1.0083601474761963, |
| "learning_rate": 1.4744594171106239e-06, |
| "loss": 1.1763, |
| "mean_token_accuracy": 0.7265212672948838, |
| "num_tokens": 243194983.0, |
| "step": 2350 |
| }, |
| { |
| "epoch": 0.7218296482030241, |
| "grad_norm": 0.8960743546485901, |
| "learning_rate": 1.4352867439674083e-06, |
| "loss": 1.1586, |
| "mean_token_accuracy": 0.7288067770004273, |
| "num_tokens": 245824173.0, |
| "step": 2375 |
| }, |
| { |
| "epoch": 0.7294278550262138, |
| "grad_norm": 0.9686855673789978, |
| "learning_rate": 1.3961140708241932e-06, |
| "loss": 1.1404, |
| "mean_token_accuracy": 0.732091948390007, |
| "num_tokens": 248428880.0, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.7370260618494036, |
| "grad_norm": 0.8258799910545349, |
| "learning_rate": 1.3569413976809779e-06, |
| "loss": 1.1119, |
| "mean_token_accuracy": 0.7375982666015625, |
| "num_tokens": 251034901.0, |
| "step": 2425 |
| }, |
| { |
| "epoch": 0.7446242686725932, |
| "grad_norm": 0.9364578127861023, |
| "learning_rate": 1.3177687245377627e-06, |
| "loss": 1.162, |
| "mean_token_accuracy": 0.7280535787343979, |
| "num_tokens": 253656324.0, |
| "step": 2450 |
| }, |
| { |
| "epoch": 0.752222475495783, |
| "grad_norm": 0.8600585460662842, |
| "learning_rate": 1.2785960513945472e-06, |
| "loss": 1.0984, |
| "mean_token_accuracy": 0.7421051144599915, |
| "num_tokens": 256255282.0, |
| "step": 2475 |
| }, |
| { |
| "epoch": 0.7598206823189727, |
| "grad_norm": 0.955594003200531, |
| "learning_rate": 1.2394233782513319e-06, |
| "loss": 1.1248, |
| "mean_token_accuracy": 0.7350970155000687, |
| "num_tokens": 258805575.0, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.7674188891421625, |
| "grad_norm": 0.8654617667198181, |
| "learning_rate": 1.2002507051081167e-06, |
| "loss": 1.139, |
| "mean_token_accuracy": 0.7342792183160782, |
| "num_tokens": 261394940.0, |
| "step": 2525 |
| }, |
| { |
| "epoch": 0.7750170959653522, |
| "grad_norm": 0.9037804007530212, |
| "learning_rate": 1.1610780319649014e-06, |
| "loss": 1.1507, |
| "mean_token_accuracy": 0.731409004330635, |
| "num_tokens": 264013243.0, |
| "step": 2550 |
| }, |
| { |
| "epoch": 0.7826153027885419, |
| "grad_norm": 0.9133325815200806, |
| "learning_rate": 1.121905358821686e-06, |
| "loss": 1.097, |
| "mean_token_accuracy": 0.7419029778242111, |
| "num_tokens": 266628788.0, |
| "step": 2575 |
| }, |
| { |
| "epoch": 0.7902135096117316, |
| "grad_norm": 0.960132360458374, |
| "learning_rate": 1.0827326856784708e-06, |
| "loss": 1.1012, |
| "mean_token_accuracy": 0.7402530944347382, |
| "num_tokens": 269174466.0, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.7978117164349213, |
| "grad_norm": 1.2616750001907349, |
| "learning_rate": 1.0435600125352554e-06, |
| "loss": 1.1035, |
| "mean_token_accuracy": 0.7417422020435334, |
| "num_tokens": 271735962.0, |
| "step": 2625 |
| }, |
| { |
| "epoch": 0.8054099232581111, |
| "grad_norm": 0.9597173929214478, |
| "learning_rate": 1.00438733939204e-06, |
| "loss": 1.0966, |
| "mean_token_accuracy": 0.7435225594043732, |
| "num_tokens": 274392874.0, |
| "step": 2650 |
| }, |
| { |
| "epoch": 0.8130081300813008, |
| "grad_norm": 0.8417394161224365, |
| "learning_rate": 9.65214666248825e-07, |
| "loss": 1.1347, |
| "mean_token_accuracy": 0.7362286591529846, |
| "num_tokens": 276982423.0, |
| "step": 2675 |
| }, |
| { |
| "epoch": 0.8206063369044906, |
| "grad_norm": 0.8907487392425537, |
| "learning_rate": 9.260419931056095e-07, |
| "loss": 1.1112, |
| "mean_token_accuracy": 0.7379048210382462, |
| "num_tokens": 279629729.0, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.8282045437276803, |
| "grad_norm": 0.9732270240783691, |
| "learning_rate": 8.868693199623943e-07, |
| "loss": 1.1219, |
| "mean_token_accuracy": 0.7383491581678391, |
| "num_tokens": 282200981.0, |
| "step": 2725 |
| }, |
| { |
| "epoch": 0.8358027505508699, |
| "grad_norm": 0.8799415230751038, |
| "learning_rate": 8.47696646819179e-07, |
| "loss": 1.1059, |
| "mean_token_accuracy": 0.7389830541610718, |
| "num_tokens": 284699697.0, |
| "step": 2750 |
| }, |
| { |
| "epoch": 0.8434009573740597, |
| "grad_norm": 0.8372652530670166, |
| "learning_rate": 8.085239736759637e-07, |
| "loss": 1.123, |
| "mean_token_accuracy": 0.7350180590152741, |
| "num_tokens": 287274170.0, |
| "step": 2775 |
| }, |
| { |
| "epoch": 0.8509991641972494, |
| "grad_norm": 0.8474750518798828, |
| "learning_rate": 7.693513005327484e-07, |
| "loss": 1.1316, |
| "mean_token_accuracy": 0.735204011797905, |
| "num_tokens": 289905267.0, |
| "step": 2800 |
| }, |
| { |
| "epoch": 0.8585973710204392, |
| "grad_norm": 0.9228349924087524, |
| "learning_rate": 7.301786273895331e-07, |
| "loss": 1.1276, |
| "mean_token_accuracy": 0.7360332900285721, |
| "num_tokens": 292531690.0, |
| "step": 2825 |
| }, |
| { |
| "epoch": 0.8661955778436289, |
| "grad_norm": 0.9425334334373474, |
| "learning_rate": 6.910059542463178e-07, |
| "loss": 1.1446, |
| "mean_token_accuracy": 0.7327738231420518, |
| "num_tokens": 295102204.0, |
| "step": 2850 |
| }, |
| { |
| "epoch": 0.8737937846668187, |
| "grad_norm": 0.8812386393547058, |
| "learning_rate": 6.518332811031025e-07, |
| "loss": 1.1197, |
| "mean_token_accuracy": 0.7385855436325073, |
| "num_tokens": 297696866.0, |
| "step": 2875 |
| }, |
| { |
| "epoch": 0.8813919914900084, |
| "grad_norm": 0.8696539998054504, |
| "learning_rate": 6.126606079598872e-07, |
| "loss": 1.1084, |
| "mean_token_accuracy": 0.740631007552147, |
| "num_tokens": 300294510.0, |
| "step": 2900 |
| }, |
| { |
| "epoch": 0.888990198313198, |
| "grad_norm": 0.9522122144699097, |
| "learning_rate": 5.73487934816672e-07, |
| "loss": 1.1582, |
| "mean_token_accuracy": 0.7323483002185821, |
| "num_tokens": 302898731.0, |
| "step": 2925 |
| }, |
| { |
| "epoch": 0.8965884051363878, |
| "grad_norm": 0.9082636833190918, |
| "learning_rate": 5.343152616734566e-07, |
| "loss": 1.1264, |
| "mean_token_accuracy": 0.7359338957071304, |
| "num_tokens": 305410011.0, |
| "step": 2950 |
| }, |
| { |
| "epoch": 0.9041866119595775, |
| "grad_norm": 0.9275860786437988, |
| "learning_rate": 4.951425885302413e-07, |
| "loss": 1.1528, |
| "mean_token_accuracy": 0.7339082890748978, |
| "num_tokens": 308110984.0, |
| "step": 2975 |
| }, |
| { |
| "epoch": 0.9117848187827673, |
| "grad_norm": 1.0664831399917603, |
| "learning_rate": 4.55969915387026e-07, |
| "loss": 1.1404, |
| "mean_token_accuracy": 0.7343163812160491, |
| "num_tokens": 310767815.0, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.919383025605957, |
| "grad_norm": 0.9530277252197266, |
| "learning_rate": 4.1679724224381073e-07, |
| "loss": 1.1293, |
| "mean_token_accuracy": 0.7350982189178467, |
| "num_tokens": 313289189.0, |
| "step": 3025 |
| }, |
| { |
| "epoch": 0.9269812324291468, |
| "grad_norm": 0.8777811527252197, |
| "learning_rate": 3.7762456910059545e-07, |
| "loss": 1.1222, |
| "mean_token_accuracy": 0.7371740919351578, |
| "num_tokens": 315885578.0, |
| "step": 3050 |
| }, |
| { |
| "epoch": 0.9345794392523364, |
| "grad_norm": 0.9783887267112732, |
| "learning_rate": 3.3845189595738017e-07, |
| "loss": 1.1018, |
| "mean_token_accuracy": 0.7426222825050354, |
| "num_tokens": 318443917.0, |
| "step": 3075 |
| }, |
| { |
| "epoch": 0.9421776460755261, |
| "grad_norm": 0.8978294730186462, |
| "learning_rate": 2.9927922281416484e-07, |
| "loss": 1.1463, |
| "mean_token_accuracy": 0.7316457951068878, |
| "num_tokens": 321002620.0, |
| "step": 3100 |
| }, |
| { |
| "epoch": 0.9497758528987159, |
| "grad_norm": 0.9735873937606812, |
| "learning_rate": 2.6010654967094956e-07, |
| "loss": 1.1119, |
| "mean_token_accuracy": 0.7383992117643357, |
| "num_tokens": 323561038.0, |
| "step": 3125 |
| }, |
| { |
| "epoch": 0.9573740597219056, |
| "grad_norm": 0.8766360282897949, |
| "learning_rate": 2.2093387652773425e-07, |
| "loss": 1.0943, |
| "mean_token_accuracy": 0.7416167676448822, |
| "num_tokens": 326152504.0, |
| "step": 3150 |
| }, |
| { |
| "epoch": 0.9649722665450954, |
| "grad_norm": 0.8869988322257996, |
| "learning_rate": 1.8176120338451897e-07, |
| "loss": 1.1099, |
| "mean_token_accuracy": 0.7376412642002106, |
| "num_tokens": 328738142.0, |
| "step": 3175 |
| }, |
| { |
| "epoch": 0.9725704733682851, |
| "grad_norm": 0.9266097545623779, |
| "learning_rate": 1.4258853024130367e-07, |
| "loss": 1.1165, |
| "mean_token_accuracy": 0.7394632256031036, |
| "num_tokens": 331295732.0, |
| "step": 3200 |
| }, |
| { |
| "epoch": 0.9801686801914749, |
| "grad_norm": 0.9964390993118286, |
| "learning_rate": 1.0341585709808838e-07, |
| "loss": 1.1191, |
| "mean_token_accuracy": 0.7373811560869217, |
| "num_tokens": 333891414.0, |
| "step": 3225 |
| }, |
| { |
| "epoch": 0.9877668870146645, |
| "grad_norm": 0.9817653298377991, |
| "learning_rate": 6.424318395487308e-08, |
| "loss": 1.1356, |
| "mean_token_accuracy": 0.7354251599311828, |
| "num_tokens": 336538563.0, |
| "step": 3250 |
| }, |
| { |
| "epoch": 0.9953650938378543, |
| "grad_norm": 0.8675805330276489, |
| "learning_rate": 2.507051081165779e-08, |
| "loss": 1.1482, |
| "mean_token_accuracy": 0.7319550043344498, |
| "num_tokens": 339216210.0, |
| "step": 3275 |
| } |
| ], |
| "logging_steps": 25, |
| "max_steps": 3290, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 1000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.8698621709430292e+18, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|