| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.912, |
| "eval_steps": 156, |
| "global_step": 1092, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.0, |
| "learning_rate": 0.0001, |
| "loss": 1.4295, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.01, |
| "learning_rate": 0.0002, |
| "loss": 1.3455, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.01, |
| "learning_rate": 0.00019982190560997328, |
| "loss": 1.2167, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.01, |
| "learning_rate": 0.00019964381121994658, |
| "loss": 1.306, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.01, |
| "learning_rate": 0.00019946571682991985, |
| "loss": 1.1137, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.02, |
| "learning_rate": 0.00019928762243989317, |
| "loss": 0.9356, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.02, |
| "learning_rate": 0.00019910952804986644, |
| "loss": 0.8762, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.02, |
| "learning_rate": 0.00019893143365983974, |
| "loss": 1.2792, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.02, |
| "learning_rate": 0.000198753339269813, |
| "loss": 0.9902, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.03, |
| "learning_rate": 0.0001985752448797863, |
| "loss": 0.9053, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.03, |
| "learning_rate": 0.00019839715048975957, |
| "loss": 1.042, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.03, |
| "learning_rate": 0.00019821905609973287, |
| "loss": 1.0665, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.03, |
| "learning_rate": 0.00019804096170970617, |
| "loss": 0.9533, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.04, |
| "learning_rate": 0.00019786286731967944, |
| "loss": 0.8653, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.04, |
| "learning_rate": 0.00019768477292965274, |
| "loss": 0.749, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.04, |
| "learning_rate": 0.000197506678539626, |
| "loss": 0.8665, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.05, |
| "learning_rate": 0.0001973285841495993, |
| "loss": 0.3864, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.05, |
| "learning_rate": 0.00019715048975957257, |
| "loss": 0.9109, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.05, |
| "learning_rate": 0.00019697239536954587, |
| "loss": 1.0354, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.05, |
| "learning_rate": 0.00019679430097951917, |
| "loss": 1.1834, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 0.00019661620658949244, |
| "loss": 0.8403, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 0.00019643811219946573, |
| "loss": 1.0325, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 0.000196260017809439, |
| "loss": 0.9493, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 0.0001960819234194123, |
| "loss": 0.9995, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 0.00019590382902938557, |
| "loss": 1.0611, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 0.00019572573463935887, |
| "loss": 0.8022, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 0.00019554764024933216, |
| "loss": 0.3385, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 0.00019536954585930543, |
| "loss": 0.9965, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.08, |
| "learning_rate": 0.00019519145146927873, |
| "loss": 0.9474, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.08, |
| "learning_rate": 0.000195013357079252, |
| "loss": 0.9355, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.08, |
| "learning_rate": 0.0001948352626892253, |
| "loss": 0.786, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 0.00019465716829919857, |
| "loss": 0.9528, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 0.00019447907390917187, |
| "loss": 0.5179, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 0.00019430097951914516, |
| "loss": 1.0531, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 0.00019412288512911846, |
| "loss": 1.0949, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.1, |
| "learning_rate": 0.00019394479073909173, |
| "loss": 1.0082, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.1, |
| "learning_rate": 0.000193766696349065, |
| "loss": 0.5266, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.1, |
| "learning_rate": 0.0001935886019590383, |
| "loss": 0.9854, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.1, |
| "learning_rate": 0.00019341050756901157, |
| "loss": 0.7773, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.00019323241317898486, |
| "loss": 0.675, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.00019305431878895816, |
| "loss": 0.78, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.00019287622439893146, |
| "loss": 0.8928, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.00019269813000890473, |
| "loss": 0.8557, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.12, |
| "learning_rate": 0.00019252003561887803, |
| "loss": 1.1408, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.12, |
| "learning_rate": 0.0001923419412288513, |
| "loss": 0.3607, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.12, |
| "learning_rate": 0.0001921638468388246, |
| "loss": 0.4096, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 0.00019198575244879786, |
| "loss": 0.8893, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 0.00019180765805877116, |
| "loss": 0.7385, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 0.00019162956366874446, |
| "loss": 0.7721, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 0.00019145146927871773, |
| "loss": 0.8139, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.14, |
| "learning_rate": 0.00019127337488869102, |
| "loss": 0.8297, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.14, |
| "learning_rate": 0.0001910952804986643, |
| "loss": 0.5069, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.14, |
| "learning_rate": 0.0001909171861086376, |
| "loss": 0.8205, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.14, |
| "learning_rate": 0.00019073909171861086, |
| "loss": 0.8154, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 0.00019056099732858416, |
| "loss": 0.7765, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 0.00019038290293855745, |
| "loss": 0.9416, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 0.00019020480854853072, |
| "loss": 0.3363, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 0.00019002671415850402, |
| "loss": 0.4339, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.16, |
| "learning_rate": 0.0001898486197684773, |
| "loss": 0.8589, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.16, |
| "learning_rate": 0.0001896705253784506, |
| "loss": 1.0326, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.16, |
| "learning_rate": 0.00018949243098842386, |
| "loss": 0.8329, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 0.00018931433659839716, |
| "loss": 0.7575, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 0.00018913624220837045, |
| "loss": 0.4671, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 0.00018895814781834372, |
| "loss": 0.8195, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 0.00018878005342831702, |
| "loss": 0.7257, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 0.0001886019590382903, |
| "loss": 0.7636, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 0.00018842386464826359, |
| "loss": 0.8273, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 0.00018824577025823686, |
| "loss": 0.7633, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 0.00018806767586821018, |
| "loss": 0.8532, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.19, |
| "learning_rate": 0.00018788958147818345, |
| "loss": 0.7717, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.19, |
| "learning_rate": 0.00018771148708815675, |
| "loss": 0.8852, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.19, |
| "learning_rate": 0.00018753339269813002, |
| "loss": 0.3202, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.19, |
| "learning_rate": 0.0001873552983081033, |
| "loss": 0.6955, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.2, |
| "learning_rate": 0.00018717720391807658, |
| "loss": 0.8806, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.2, |
| "learning_rate": 0.00018699910952804985, |
| "loss": 0.3793, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.2, |
| "learning_rate": 0.00018682101513802318, |
| "loss": 0.7618, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 0.00018664292074799645, |
| "loss": 0.8192, |
| "step": 77 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 0.00018646482635796975, |
| "loss": 0.4192, |
| "step": 78 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 0.00018628673196794302, |
| "loss": 0.8385, |
| "step": 79 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 0.0001861086375779163, |
| "loss": 0.4327, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.22, |
| "learning_rate": 0.00018593054318788958, |
| "loss": 0.8979, |
| "step": 81 |
| }, |
| { |
| "epoch": 0.22, |
| "learning_rate": 0.00018575244879786285, |
| "loss": 0.9529, |
| "step": 82 |
| }, |
| { |
| "epoch": 0.22, |
| "learning_rate": 0.00018557435440783618, |
| "loss": 0.8436, |
| "step": 83 |
| }, |
| { |
| "epoch": 0.22, |
| "learning_rate": 0.00018539626001780945, |
| "loss": 0.6772, |
| "step": 84 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 0.00018521816562778274, |
| "loss": 0.7147, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 0.00018504007123775601, |
| "loss": 0.767, |
| "step": 86 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 0.0001848619768477293, |
| "loss": 0.8866, |
| "step": 87 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 0.00018468388245770258, |
| "loss": 0.8675, |
| "step": 88 |
| }, |
| { |
| "epoch": 0.24, |
| "learning_rate": 0.00018450578806767588, |
| "loss": 0.7725, |
| "step": 89 |
| }, |
| { |
| "epoch": 0.24, |
| "learning_rate": 0.00018432769367764917, |
| "loss": 0.9603, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.24, |
| "learning_rate": 0.00018414959928762244, |
| "loss": 0.7294, |
| "step": 91 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 0.00018397150489759574, |
| "loss": 0.7915, |
| "step": 92 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 0.000183793410507569, |
| "loss": 0.8872, |
| "step": 93 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 0.0001836153161175423, |
| "loss": 0.8274, |
| "step": 94 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 0.00018343722172751558, |
| "loss": 0.8482, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.26, |
| "learning_rate": 0.00018325912733748888, |
| "loss": 0.7448, |
| "step": 96 |
| }, |
| { |
| "epoch": 0.26, |
| "learning_rate": 0.00018308103294746217, |
| "loss": 0.7057, |
| "step": 97 |
| }, |
| { |
| "epoch": 0.26, |
| "learning_rate": 0.00018290293855743544, |
| "loss": 0.9329, |
| "step": 98 |
| }, |
| { |
| "epoch": 0.26, |
| "learning_rate": 0.00018272484416740874, |
| "loss": 0.8465, |
| "step": 99 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 0.000182546749777382, |
| "loss": 0.391, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 0.0001823686553873553, |
| "loss": 0.0, |
| "step": 101 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 0.00018219056099732858, |
| "loss": 0.8871, |
| "step": 102 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 0.00018201246660730187, |
| "loss": 0.7289, |
| "step": 103 |
| }, |
| { |
| "epoch": 0.28, |
| "learning_rate": 0.00018183437221727517, |
| "loss": 0.5009, |
| "step": 104 |
| }, |
| { |
| "epoch": 0.28, |
| "learning_rate": 0.00018165627782724847, |
| "loss": 0.6576, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.28, |
| "learning_rate": 0.00018147818343722174, |
| "loss": 0.7306, |
| "step": 106 |
| }, |
| { |
| "epoch": 0.29, |
| "learning_rate": 0.000181300089047195, |
| "loss": 0.3679, |
| "step": 107 |
| }, |
| { |
| "epoch": 0.29, |
| "learning_rate": 0.0001811219946571683, |
| "loss": 0.7107, |
| "step": 108 |
| }, |
| { |
| "epoch": 0.29, |
| "learning_rate": 0.00018094390026714157, |
| "loss": 0.3577, |
| "step": 109 |
| }, |
| { |
| "epoch": 0.29, |
| "learning_rate": 0.00018076580587711487, |
| "loss": 0.8751, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 0.00018058771148708817, |
| "loss": 0.9237, |
| "step": 111 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 0.00018040961709706147, |
| "loss": 0.8281, |
| "step": 112 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 0.00018023152270703474, |
| "loss": 0.3885, |
| "step": 113 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 0.00018005342831700803, |
| "loss": 0.826, |
| "step": 114 |
| }, |
| { |
| "epoch": 0.31, |
| "learning_rate": 0.0001798753339269813, |
| "loss": 0.7249, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.31, |
| "learning_rate": 0.0001796972395369546, |
| "loss": 0.7137, |
| "step": 116 |
| }, |
| { |
| "epoch": 0.31, |
| "learning_rate": 0.00017951914514692787, |
| "loss": 0.4369, |
| "step": 117 |
| }, |
| { |
| "epoch": 0.31, |
| "learning_rate": 0.00017934105075690117, |
| "loss": 0.4071, |
| "step": 118 |
| }, |
| { |
| "epoch": 0.32, |
| "learning_rate": 0.00017916295636687446, |
| "loss": 0.9597, |
| "step": 119 |
| }, |
| { |
| "epoch": 0.32, |
| "learning_rate": 0.00017898486197684773, |
| "loss": 0.838, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.32, |
| "learning_rate": 0.00017880676758682103, |
| "loss": 0.8103, |
| "step": 121 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 0.0001786286731967943, |
| "loss": 0.7322, |
| "step": 122 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 0.0001784505788067676, |
| "loss": 1.0234, |
| "step": 123 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 0.00017827248441674087, |
| "loss": 0.8578, |
| "step": 124 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 0.00017809439002671417, |
| "loss": 1.0393, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 0.00017791629563668746, |
| "loss": 0.8359, |
| "step": 126 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 0.00017773820124666073, |
| "loss": 0.7979, |
| "step": 127 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 0.00017756010685663403, |
| "loss": 0.7395, |
| "step": 128 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 0.0001773820124666073, |
| "loss": 0.6665, |
| "step": 129 |
| }, |
| { |
| "epoch": 0.35, |
| "learning_rate": 0.0001772039180765806, |
| "loss": 0.8745, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.35, |
| "learning_rate": 0.00017702582368655387, |
| "loss": 0.7065, |
| "step": 131 |
| }, |
| { |
| "epoch": 0.35, |
| "learning_rate": 0.00017684772929652716, |
| "loss": 0.3072, |
| "step": 132 |
| }, |
| { |
| "epoch": 0.35, |
| "learning_rate": 0.00017666963490650046, |
| "loss": 0.718, |
| "step": 133 |
| }, |
| { |
| "epoch": 0.36, |
| "learning_rate": 0.00017649154051647373, |
| "loss": 0.8404, |
| "step": 134 |
| }, |
| { |
| "epoch": 0.36, |
| "learning_rate": 0.00017631344612644703, |
| "loss": 0.7236, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.36, |
| "learning_rate": 0.0001761353517364203, |
| "loss": 0.6529, |
| "step": 136 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 0.0001759572573463936, |
| "loss": 0.4177, |
| "step": 137 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 0.00017577916295636686, |
| "loss": 0.7227, |
| "step": 138 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 0.0001756010685663402, |
| "loss": 0.8079, |
| "step": 139 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 0.00017542297417631346, |
| "loss": 0.4759, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.38, |
| "learning_rate": 0.00017524487978628676, |
| "loss": 0.6804, |
| "step": 141 |
| }, |
| { |
| "epoch": 0.38, |
| "learning_rate": 0.00017506678539626003, |
| "loss": 0.773, |
| "step": 142 |
| }, |
| { |
| "epoch": 0.38, |
| "learning_rate": 0.0001748886910062333, |
| "loss": 0.847, |
| "step": 143 |
| }, |
| { |
| "epoch": 0.38, |
| "learning_rate": 0.0001747105966162066, |
| "loss": 0.8355, |
| "step": 144 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 0.0001745325022261799, |
| "loss": 0.8926, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 0.0001743544078361532, |
| "loss": 0.672, |
| "step": 146 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 0.00017417631344612646, |
| "loss": 0.3974, |
| "step": 147 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 0.00017399821905609975, |
| "loss": 0.6902, |
| "step": 148 |
| }, |
| { |
| "epoch": 0.4, |
| "learning_rate": 0.00017382012466607302, |
| "loss": 0.7488, |
| "step": 149 |
| }, |
| { |
| "epoch": 0.4, |
| "learning_rate": 0.00017364203027604632, |
| "loss": 0.6907, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.4, |
| "learning_rate": 0.0001734639358860196, |
| "loss": 0.6857, |
| "step": 151 |
| }, |
| { |
| "epoch": 0.41, |
| "learning_rate": 0.0001732858414959929, |
| "loss": 0.7513, |
| "step": 152 |
| }, |
| { |
| "epoch": 0.41, |
| "learning_rate": 0.00017310774710596618, |
| "loss": 0.7948, |
| "step": 153 |
| }, |
| { |
| "epoch": 0.41, |
| "learning_rate": 0.00017292965271593945, |
| "loss": 0.8763, |
| "step": 154 |
| }, |
| { |
| "epoch": 0.41, |
| "learning_rate": 0.00017275155832591275, |
| "loss": 0.9951, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 0.00017257346393588602, |
| "loss": 0.7795, |
| "step": 156 |
| }, |
| { |
| "epoch": 0.42, |
| "eval_loss": 0.7492814064025879, |
| "eval_runtime": 3075.3295, |
| "eval_samples_per_second": 0.976, |
| "eval_steps_per_second": 0.061, |
| "step": 156 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 0.00017239536954585932, |
| "loss": 0.7795, |
| "step": 157 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 0.0001722172751558326, |
| "loss": 0.75, |
| "step": 158 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 0.00017203918076580589, |
| "loss": 0.7877, |
| "step": 159 |
| }, |
| { |
| "epoch": 0.43, |
| "learning_rate": 0.00017186108637577918, |
| "loss": 0.9346, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.43, |
| "learning_rate": 0.00017168299198575245, |
| "loss": 0.7944, |
| "step": 161 |
| }, |
| { |
| "epoch": 0.43, |
| "learning_rate": 0.00017150489759572575, |
| "loss": 0.7398, |
| "step": 162 |
| }, |
| { |
| "epoch": 0.43, |
| "learning_rate": 0.00017132680320569902, |
| "loss": 0.8405, |
| "step": 163 |
| }, |
| { |
| "epoch": 0.44, |
| "learning_rate": 0.00017114870881567232, |
| "loss": 0.7651, |
| "step": 164 |
| }, |
| { |
| "epoch": 0.44, |
| "learning_rate": 0.0001709706144256456, |
| "loss": 0.7257, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.44, |
| "learning_rate": 0.00017079252003561888, |
| "loss": 0.7083, |
| "step": 166 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 0.00017061442564559218, |
| "loss": 0.7598, |
| "step": 167 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 0.00017043633125556545, |
| "loss": 0.7595, |
| "step": 168 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 0.00017025823686553875, |
| "loss": 0.0, |
| "step": 169 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 0.00017008014247551202, |
| "loss": 0.8517, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.46, |
| "learning_rate": 0.00016990204808548531, |
| "loss": 0.6331, |
| "step": 171 |
| }, |
| { |
| "epoch": 0.46, |
| "learning_rate": 0.00016972395369545858, |
| "loss": 0.7086, |
| "step": 172 |
| }, |
| { |
| "epoch": 0.46, |
| "learning_rate": 0.0001695458593054319, |
| "loss": 0.3294, |
| "step": 173 |
| }, |
| { |
| "epoch": 0.46, |
| "learning_rate": 0.00016936776491540518, |
| "loss": 0.6783, |
| "step": 174 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 0.00016918967052537848, |
| "loss": 0.7448, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 0.00016901157613535175, |
| "loss": 0.7255, |
| "step": 176 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 0.00016883348174532502, |
| "loss": 0.8362, |
| "step": 177 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 0.0001686553873552983, |
| "loss": 0.6382, |
| "step": 178 |
| }, |
| { |
| "epoch": 0.48, |
| "learning_rate": 0.00016847729296527158, |
| "loss": 0.8469, |
| "step": 179 |
| }, |
| { |
| "epoch": 0.48, |
| "learning_rate": 0.0001682991985752449, |
| "loss": 0.7967, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.48, |
| "learning_rate": 0.00016812110418521818, |
| "loss": 0.7722, |
| "step": 181 |
| }, |
| { |
| "epoch": 0.49, |
| "learning_rate": 0.00016794300979519147, |
| "loss": 0.7976, |
| "step": 182 |
| }, |
| { |
| "epoch": 0.49, |
| "learning_rate": 0.00016776491540516474, |
| "loss": 0.6826, |
| "step": 183 |
| }, |
| { |
| "epoch": 0.49, |
| "learning_rate": 0.00016758682101513804, |
| "loss": 0.8447, |
| "step": 184 |
| }, |
| { |
| "epoch": 0.49, |
| "learning_rate": 0.0001674087266251113, |
| "loss": 0.652, |
| "step": 185 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 0.0001672306322350846, |
| "loss": 0.8129, |
| "step": 186 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 0.0001670525378450579, |
| "loss": 0.9252, |
| "step": 187 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 0.00016687444345503117, |
| "loss": 0.411, |
| "step": 188 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 0.00016669634906500447, |
| "loss": 0.789, |
| "step": 189 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 0.00016651825467497774, |
| "loss": 0.7202, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 0.00016634016028495104, |
| "loss": 0.645, |
| "step": 191 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 0.0001661620658949243, |
| "loss": 0.8303, |
| "step": 192 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 0.0001659839715048976, |
| "loss": 0.8786, |
| "step": 193 |
| }, |
| { |
| "epoch": 0.52, |
| "learning_rate": 0.0001658058771148709, |
| "loss": 0.7888, |
| "step": 194 |
| }, |
| { |
| "epoch": 0.52, |
| "learning_rate": 0.00016562778272484417, |
| "loss": 0.8448, |
| "step": 195 |
| }, |
| { |
| "epoch": 0.52, |
| "learning_rate": 0.00016544968833481747, |
| "loss": 0.7375, |
| "step": 196 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 0.00016527159394479074, |
| "loss": 0.3857, |
| "step": 197 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 0.00016509349955476404, |
| "loss": 0.3874, |
| "step": 198 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 0.0001649154051647373, |
| "loss": 0.6513, |
| "step": 199 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 0.0001647373107747106, |
| "loss": 0.6204, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 0.0001645592163846839, |
| "loss": 0.7956, |
| "step": 201 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 0.00016438112199465717, |
| "loss": 0.7612, |
| "step": 202 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 0.00016420302760463047, |
| "loss": 0.8245, |
| "step": 203 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 0.00016402493321460374, |
| "loss": 0.7008, |
| "step": 204 |
| }, |
| { |
| "epoch": 0.55, |
| "learning_rate": 0.00016384683882457704, |
| "loss": 0.7392, |
| "step": 205 |
| }, |
| { |
| "epoch": 0.55, |
| "learning_rate": 0.0001636687444345503, |
| "loss": 0.818, |
| "step": 206 |
| }, |
| { |
| "epoch": 0.55, |
| "learning_rate": 0.0001634906500445236, |
| "loss": 0.8982, |
| "step": 207 |
| }, |
| { |
| "epoch": 0.55, |
| "learning_rate": 0.0001633125556544969, |
| "loss": 0.7238, |
| "step": 208 |
| }, |
| { |
| "epoch": 0.56, |
| "learning_rate": 0.0001631344612644702, |
| "loss": 0.8638, |
| "step": 209 |
| }, |
| { |
| "epoch": 0.56, |
| "learning_rate": 0.00016295636687444347, |
| "loss": 0.8059, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.56, |
| "learning_rate": 0.00016277827248441674, |
| "loss": 0.7457, |
| "step": 211 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 0.00016260017809439003, |
| "loss": 0.6653, |
| "step": 212 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 0.0001624220837043633, |
| "loss": 0.3836, |
| "step": 213 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 0.0001622439893143366, |
| "loss": 0.7131, |
| "step": 214 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 0.0001620658949243099, |
| "loss": 0.744, |
| "step": 215 |
| }, |
| { |
| "epoch": 0.58, |
| "learning_rate": 0.0001618878005342832, |
| "loss": 0.6956, |
| "step": 216 |
| }, |
| { |
| "epoch": 0.58, |
| "learning_rate": 0.00016170970614425646, |
| "loss": 0.881, |
| "step": 217 |
| }, |
| { |
| "epoch": 0.58, |
| "learning_rate": 0.00016153161175422976, |
| "loss": 0.8571, |
| "step": 218 |
| }, |
| { |
| "epoch": 0.58, |
| "learning_rate": 0.00016135351736420303, |
| "loss": 0.8178, |
| "step": 219 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 0.00016117542297417633, |
| "loss": 0.6912, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 0.0001609973285841496, |
| "loss": 0.7055, |
| "step": 221 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 0.0001608192341941229, |
| "loss": 0.7527, |
| "step": 222 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 0.0001606411398040962, |
| "loss": 0.7784, |
| "step": 223 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 0.00016046304541406946, |
| "loss": 0.759, |
| "step": 224 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 0.00016028495102404276, |
| "loss": 0.9544, |
| "step": 225 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 0.00016010685663401603, |
| "loss": 0.7427, |
| "step": 226 |
| }, |
| { |
| "epoch": 0.61, |
| "learning_rate": 0.00015992876224398933, |
| "loss": 0.612, |
| "step": 227 |
| }, |
| { |
| "epoch": 0.61, |
| "learning_rate": 0.0001597506678539626, |
| "loss": 0.7389, |
| "step": 228 |
| }, |
| { |
| "epoch": 0.61, |
| "learning_rate": 0.0001595725734639359, |
| "loss": 0.3279, |
| "step": 229 |
| }, |
| { |
| "epoch": 0.61, |
| "learning_rate": 0.0001593944790739092, |
| "loss": 0.7942, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.62, |
| "learning_rate": 0.00015921638468388246, |
| "loss": 0.7038, |
| "step": 231 |
| }, |
| { |
| "epoch": 0.62, |
| "learning_rate": 0.00015903829029385576, |
| "loss": 0.9062, |
| "step": 232 |
| }, |
| { |
| "epoch": 0.62, |
| "learning_rate": 0.00015886019590382903, |
| "loss": 0.3263, |
| "step": 233 |
| }, |
| { |
| "epoch": 0.62, |
| "learning_rate": 0.00015868210151380232, |
| "loss": 0.783, |
| "step": 234 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 0.0001585040071237756, |
| "loss": 0.7146, |
| "step": 235 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 0.0001583259127337489, |
| "loss": 0.7713, |
| "step": 236 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 0.0001581478183437222, |
| "loss": 0.8108, |
| "step": 237 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 0.00015796972395369546, |
| "loss": 0.8813, |
| "step": 238 |
| }, |
| { |
| "epoch": 0.64, |
| "learning_rate": 0.00015779162956366876, |
| "loss": 0.8187, |
| "step": 239 |
| }, |
| { |
| "epoch": 0.64, |
| "learning_rate": 0.00015761353517364203, |
| "loss": 0.8153, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.64, |
| "learning_rate": 0.00015743544078361532, |
| "loss": 0.7603, |
| "step": 241 |
| }, |
| { |
| "epoch": 0.65, |
| "learning_rate": 0.0001572573463935886, |
| "loss": 0.7647, |
| "step": 242 |
| }, |
| { |
| "epoch": 0.65, |
| "learning_rate": 0.00015707925200356192, |
| "loss": 0.0, |
| "step": 243 |
| }, |
| { |
| "epoch": 0.65, |
| "learning_rate": 0.0001569011576135352, |
| "loss": 0.7422, |
| "step": 244 |
| }, |
| { |
| "epoch": 0.65, |
| "learning_rate": 0.00015672306322350848, |
| "loss": 0.6771, |
| "step": 245 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 0.00015654496883348175, |
| "loss": 0.6732, |
| "step": 246 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 0.00015636687444345502, |
| "loss": 0.3087, |
| "step": 247 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 0.00015618878005342832, |
| "loss": 0.3659, |
| "step": 248 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 0.0001560106856634016, |
| "loss": 0.637, |
| "step": 249 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 0.00015583259127337491, |
| "loss": 0.7033, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 0.00015565449688334818, |
| "loss": 0.7298, |
| "step": 251 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 0.00015547640249332148, |
| "loss": 0.4897, |
| "step": 252 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 0.00015529830810329475, |
| "loss": 0.7378, |
| "step": 253 |
| }, |
| { |
| "epoch": 0.68, |
| "learning_rate": 0.00015512021371326805, |
| "loss": 0.7567, |
| "step": 254 |
| }, |
| { |
| "epoch": 0.68, |
| "learning_rate": 0.00015494211932324132, |
| "loss": 0.709, |
| "step": 255 |
| }, |
| { |
| "epoch": 0.68, |
| "learning_rate": 0.0001547640249332146, |
| "loss": 0.6512, |
| "step": 256 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 0.0001545859305431879, |
| "loss": 0.3269, |
| "step": 257 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 0.00015440783615316118, |
| "loss": 0.7823, |
| "step": 258 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 0.00015422974176313448, |
| "loss": 0.3131, |
| "step": 259 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 0.00015405164737310775, |
| "loss": 0.7399, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 0.00015387355298308105, |
| "loss": 0.6869, |
| "step": 261 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 0.00015369545859305432, |
| "loss": 0.6797, |
| "step": 262 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 0.00015351736420302761, |
| "loss": 0.7374, |
| "step": 263 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 0.0001533392698130009, |
| "loss": 0.6817, |
| "step": 264 |
| }, |
| { |
| "epoch": 0.71, |
| "learning_rate": 0.00015316117542297418, |
| "loss": 0.8483, |
| "step": 265 |
| }, |
| { |
| "epoch": 0.71, |
| "learning_rate": 0.00015298308103294748, |
| "loss": 0.7078, |
| "step": 266 |
| }, |
| { |
| "epoch": 0.71, |
| "learning_rate": 0.00015280498664292075, |
| "loss": 0.7772, |
| "step": 267 |
| }, |
| { |
| "epoch": 0.71, |
| "learning_rate": 0.00015262689225289405, |
| "loss": 0.3639, |
| "step": 268 |
| }, |
| { |
| "epoch": 0.72, |
| "learning_rate": 0.00015244879786286731, |
| "loss": 0.7033, |
| "step": 269 |
| }, |
| { |
| "epoch": 0.72, |
| "learning_rate": 0.0001522707034728406, |
| "loss": 0.7402, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.72, |
| "learning_rate": 0.0001520926090828139, |
| "loss": 0.3391, |
| "step": 271 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 0.00015191451469278718, |
| "loss": 0.8637, |
| "step": 272 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 0.00015173642030276048, |
| "loss": 0.6696, |
| "step": 273 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 0.00015155832591273375, |
| "loss": 0.7798, |
| "step": 274 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 0.00015138023152270704, |
| "loss": 0.3043, |
| "step": 275 |
| }, |
| { |
| "epoch": 0.74, |
| "learning_rate": 0.0001512021371326803, |
| "loss": 0.2661, |
| "step": 276 |
| }, |
| { |
| "epoch": 0.74, |
| "learning_rate": 0.0001510240427426536, |
| "loss": 0.8184, |
| "step": 277 |
| }, |
| { |
| "epoch": 0.74, |
| "learning_rate": 0.0001508459483526269, |
| "loss": 0.0, |
| "step": 278 |
| }, |
| { |
| "epoch": 0.74, |
| "learning_rate": 0.0001506678539626002, |
| "loss": 0.3891, |
| "step": 279 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 0.00015048975957257347, |
| "loss": 0.5484, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 0.00015031166518254674, |
| "loss": 0.5203, |
| "step": 281 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 0.00015013357079252004, |
| "loss": 0.7173, |
| "step": 282 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 0.0001499554764024933, |
| "loss": 0.7254, |
| "step": 283 |
| }, |
| { |
| "epoch": 0.76, |
| "learning_rate": 0.0001497773820124666, |
| "loss": 0.7333, |
| "step": 284 |
| }, |
| { |
| "epoch": 0.76, |
| "learning_rate": 0.0001495992876224399, |
| "loss": 0.821, |
| "step": 285 |
| }, |
| { |
| "epoch": 0.76, |
| "learning_rate": 0.0001494211932324132, |
| "loss": 0.8437, |
| "step": 286 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 0.00014924309884238647, |
| "loss": 0.6812, |
| "step": 287 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 0.00014906500445235977, |
| "loss": 0.4667, |
| "step": 288 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 0.00014888691006233304, |
| "loss": 0.6531, |
| "step": 289 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 0.00014870881567230634, |
| "loss": 0.7693, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.0001485307212822796, |
| "loss": 0.6915, |
| "step": 291 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.0001483526268922529, |
| "loss": 0.3783, |
| "step": 292 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.0001481745325022262, |
| "loss": 0.6953, |
| "step": 293 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.00014799643811219947, |
| "loss": 0.6734, |
| "step": 294 |
| }, |
| { |
| "epoch": 0.79, |
| "learning_rate": 0.00014781834372217277, |
| "loss": 0.6847, |
| "step": 295 |
| }, |
| { |
| "epoch": 0.79, |
| "learning_rate": 0.00014764024933214604, |
| "loss": 0.8625, |
| "step": 296 |
| }, |
| { |
| "epoch": 0.79, |
| "learning_rate": 0.00014746215494211933, |
| "loss": 0.912, |
| "step": 297 |
| }, |
| { |
| "epoch": 0.79, |
| "learning_rate": 0.0001472840605520926, |
| "loss": 0.3658, |
| "step": 298 |
| }, |
| { |
| "epoch": 0.8, |
| "learning_rate": 0.0001471059661620659, |
| "loss": 0.6601, |
| "step": 299 |
| }, |
| { |
| "epoch": 0.8, |
| "learning_rate": 0.0001469278717720392, |
| "loss": 0.3216, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.8, |
| "learning_rate": 0.00014674977738201247, |
| "loss": 0.7748, |
| "step": 301 |
| }, |
| { |
| "epoch": 0.81, |
| "learning_rate": 0.00014657168299198577, |
| "loss": 0.4081, |
| "step": 302 |
| }, |
| { |
| "epoch": 0.81, |
| "learning_rate": 0.00014639358860195904, |
| "loss": 0.488, |
| "step": 303 |
| }, |
| { |
| "epoch": 0.81, |
| "learning_rate": 0.00014621549421193233, |
| "loss": 0.7204, |
| "step": 304 |
| }, |
| { |
| "epoch": 0.81, |
| "learning_rate": 0.0001460373998219056, |
| "loss": 0.4449, |
| "step": 305 |
| }, |
| { |
| "epoch": 0.82, |
| "learning_rate": 0.0001458593054318789, |
| "loss": 0.4268, |
| "step": 306 |
| }, |
| { |
| "epoch": 0.82, |
| "learning_rate": 0.0001456812110418522, |
| "loss": 0.7598, |
| "step": 307 |
| }, |
| { |
| "epoch": 0.82, |
| "learning_rate": 0.00014550311665182547, |
| "loss": 0.3935, |
| "step": 308 |
| }, |
| { |
| "epoch": 0.82, |
| "learning_rate": 0.00014532502226179876, |
| "loss": 0.4371, |
| "step": 309 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 0.00014514692787177203, |
| "loss": 0.6249, |
| "step": 310 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 0.00014496883348174533, |
| "loss": 0.2442, |
| "step": 311 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 0.0001447907390917186, |
| "loss": 0.377, |
| "step": 312 |
| }, |
| { |
| "epoch": 0.83, |
| "eval_loss": 0.7219565510749817, |
| "eval_runtime": 3073.748, |
| "eval_samples_per_second": 0.976, |
| "eval_steps_per_second": 0.061, |
| "step": 312 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 0.00014461264470169192, |
| "loss": 0.9249, |
| "step": 313 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 0.0001444345503116652, |
| "loss": 0.7808, |
| "step": 314 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 0.00014425645592163846, |
| "loss": 0.6411, |
| "step": 315 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 0.00014407836153161176, |
| "loss": 0.7279, |
| "step": 316 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 0.00014390026714158503, |
| "loss": 0.7926, |
| "step": 317 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 0.00014372217275155833, |
| "loss": 0.7047, |
| "step": 318 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 0.0001435440783615316, |
| "loss": 1.0378, |
| "step": 319 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 0.00014336598397150492, |
| "loss": 0.8464, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.86, |
| "learning_rate": 0.0001431878895814782, |
| "loss": 0.7573, |
| "step": 321 |
| }, |
| { |
| "epoch": 0.86, |
| "learning_rate": 0.0001430097951914515, |
| "loss": 0.0, |
| "step": 322 |
| }, |
| { |
| "epoch": 0.86, |
| "learning_rate": 0.00014283170080142476, |
| "loss": 0.768, |
| "step": 323 |
| }, |
| { |
| "epoch": 0.86, |
| "learning_rate": 0.00014265360641139806, |
| "loss": 0.6512, |
| "step": 324 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 0.00014247551202137133, |
| "loss": 0.7085, |
| "step": 325 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 0.0001422974176313446, |
| "loss": 0.6705, |
| "step": 326 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 0.00014211932324131792, |
| "loss": 0.2988, |
| "step": 327 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 0.0001419412288512912, |
| "loss": 0.8387, |
| "step": 328 |
| }, |
| { |
| "epoch": 0.88, |
| "learning_rate": 0.0001417631344612645, |
| "loss": 0.6611, |
| "step": 329 |
| }, |
| { |
| "epoch": 0.88, |
| "learning_rate": 0.00014158504007123776, |
| "loss": 0.7045, |
| "step": 330 |
| }, |
| { |
| "epoch": 0.88, |
| "learning_rate": 0.00014140694568121105, |
| "loss": 0.6521, |
| "step": 331 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 0.00014122885129118432, |
| "loss": 0.6822, |
| "step": 332 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 0.00014105075690115762, |
| "loss": 0.7175, |
| "step": 333 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 0.00014087266251113092, |
| "loss": 0.6728, |
| "step": 334 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 0.0001406945681211042, |
| "loss": 0.7334, |
| "step": 335 |
| }, |
| { |
| "epoch": 0.9, |
| "learning_rate": 0.00014051647373107749, |
| "loss": 0.3581, |
| "step": 336 |
| }, |
| { |
| "epoch": 0.9, |
| "learning_rate": 0.00014033837934105076, |
| "loss": 0.3499, |
| "step": 337 |
| }, |
| { |
| "epoch": 0.9, |
| "learning_rate": 0.00014016028495102405, |
| "loss": 0.2999, |
| "step": 338 |
| }, |
| { |
| "epoch": 0.9, |
| "learning_rate": 0.00013998219056099732, |
| "loss": 0.7995, |
| "step": 339 |
| }, |
| { |
| "epoch": 0.91, |
| "learning_rate": 0.00013980409617097062, |
| "loss": 0.8377, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.91, |
| "learning_rate": 0.00013962600178094392, |
| "loss": 0.3259, |
| "step": 341 |
| }, |
| { |
| "epoch": 0.91, |
| "learning_rate": 0.0001394479073909172, |
| "loss": 0.7226, |
| "step": 342 |
| }, |
| { |
| "epoch": 0.91, |
| "learning_rate": 0.00013926981300089048, |
| "loss": 0.8406, |
| "step": 343 |
| }, |
| { |
| "epoch": 0.92, |
| "learning_rate": 0.00013909171861086375, |
| "loss": 0.7804, |
| "step": 344 |
| }, |
| { |
| "epoch": 0.92, |
| "learning_rate": 0.00013891362422083705, |
| "loss": 0.7195, |
| "step": 345 |
| }, |
| { |
| "epoch": 0.92, |
| "learning_rate": 0.00013873552983081032, |
| "loss": 0.7241, |
| "step": 346 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 0.00013855743544078362, |
| "loss": 0.8026, |
| "step": 347 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 0.00013837934105075692, |
| "loss": 0.7555, |
| "step": 348 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 0.0001382012466607302, |
| "loss": 0.0, |
| "step": 349 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 0.00013802315227070348, |
| "loss": 0.3378, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.94, |
| "learning_rate": 0.00013784505788067675, |
| "loss": 0.6628, |
| "step": 351 |
| }, |
| { |
| "epoch": 0.94, |
| "learning_rate": 0.00013766696349065005, |
| "loss": 0.3602, |
| "step": 352 |
| }, |
| { |
| "epoch": 0.94, |
| "learning_rate": 0.00013748886910062332, |
| "loss": 0.8476, |
| "step": 353 |
| }, |
| { |
| "epoch": 0.94, |
| "learning_rate": 0.00013731077471059662, |
| "loss": 0.693, |
| "step": 354 |
| }, |
| { |
| "epoch": 0.95, |
| "learning_rate": 0.0001371326803205699, |
| "loss": 0.8012, |
| "step": 355 |
| }, |
| { |
| "epoch": 0.95, |
| "learning_rate": 0.0001369545859305432, |
| "loss": 0.3607, |
| "step": 356 |
| }, |
| { |
| "epoch": 0.95, |
| "learning_rate": 0.00013677649154051648, |
| "loss": 0.8572, |
| "step": 357 |
| }, |
| { |
| "epoch": 0.95, |
| "learning_rate": 0.00013659839715048978, |
| "loss": 0.396, |
| "step": 358 |
| }, |
| { |
| "epoch": 0.96, |
| "learning_rate": 0.00013642030276046305, |
| "loss": 0.6467, |
| "step": 359 |
| }, |
| { |
| "epoch": 0.96, |
| "learning_rate": 0.00013624220837043632, |
| "loss": 0.6621, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.96, |
| "learning_rate": 0.00013606411398040961, |
| "loss": 0.7438, |
| "step": 361 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 0.0001358860195903829, |
| "loss": 0.4458, |
| "step": 362 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 0.0001357079252003562, |
| "loss": 0.6783, |
| "step": 363 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 0.00013552983081032948, |
| "loss": 0.8403, |
| "step": 364 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 0.00013535173642030278, |
| "loss": 0.8368, |
| "step": 365 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 0.00013517364203027605, |
| "loss": 0.7452, |
| "step": 366 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 0.00013499554764024934, |
| "loss": 0.6407, |
| "step": 367 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 0.0001348174532502226, |
| "loss": 0.7468, |
| "step": 368 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 0.0001346393588601959, |
| "loss": 0.7002, |
| "step": 369 |
| }, |
| { |
| "epoch": 0.99, |
| "learning_rate": 0.0001344612644701692, |
| "loss": 0.6534, |
| "step": 370 |
| }, |
| { |
| "epoch": 0.99, |
| "learning_rate": 0.00013428317008014248, |
| "loss": 0.7777, |
| "step": 371 |
| }, |
| { |
| "epoch": 0.99, |
| "learning_rate": 0.00013410507569011577, |
| "loss": 0.7862, |
| "step": 372 |
| }, |
| { |
| "epoch": 0.99, |
| "learning_rate": 0.00013392698130008904, |
| "loss": 0.7997, |
| "step": 373 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 0.00013374888691006234, |
| "loss": 0.5862, |
| "step": 374 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 0.0001335707925200356, |
| "loss": 0.7702, |
| "step": 375 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 0.0001333926981300089, |
| "loss": 0.5793, |
| "step": 376 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 0.0001332146037399822, |
| "loss": 0.6414, |
| "step": 377 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 0.00013303650934995547, |
| "loss": 0.6137, |
| "step": 378 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 0.00013285841495992877, |
| "loss": 0.7891, |
| "step": 379 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 0.00013268032056990204, |
| "loss": 0.6185, |
| "step": 380 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 0.00013250222617987534, |
| "loss": 0.6925, |
| "step": 381 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 0.0001323241317898486, |
| "loss": 0.3292, |
| "step": 382 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 0.00013214603739982193, |
| "loss": 0.5622, |
| "step": 383 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 0.0001319679430097952, |
| "loss": 0.5694, |
| "step": 384 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 0.00013178984861976847, |
| "loss": 0.6785, |
| "step": 385 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 0.00013161175422974177, |
| "loss": 0.6613, |
| "step": 386 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 0.00013143365983971504, |
| "loss": 0.5935, |
| "step": 387 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 0.00013125556544968834, |
| "loss": 0.6241, |
| "step": 388 |
| }, |
| { |
| "epoch": 1.04, |
| "learning_rate": 0.0001310774710596616, |
| "loss": 0.6922, |
| "step": 389 |
| }, |
| { |
| "epoch": 1.04, |
| "learning_rate": 0.00013089937666963493, |
| "loss": 0.2301, |
| "step": 390 |
| }, |
| { |
| "epoch": 1.04, |
| "learning_rate": 0.0001307212822796082, |
| "loss": 0.7666, |
| "step": 391 |
| }, |
| { |
| "epoch": 1.05, |
| "learning_rate": 0.0001305431878895815, |
| "loss": 0.6726, |
| "step": 392 |
| }, |
| { |
| "epoch": 1.05, |
| "learning_rate": 0.00013036509349955477, |
| "loss": 0.6813, |
| "step": 393 |
| }, |
| { |
| "epoch": 1.05, |
| "learning_rate": 0.00013018699910952806, |
| "loss": 0.5939, |
| "step": 394 |
| }, |
| { |
| "epoch": 1.05, |
| "learning_rate": 0.00013000890471950133, |
| "loss": 0.5903, |
| "step": 395 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 0.0001298308103294746, |
| "loss": 0.6489, |
| "step": 396 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 0.00012965271593944793, |
| "loss": 0.6586, |
| "step": 397 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 0.0001294746215494212, |
| "loss": 0.3495, |
| "step": 398 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 0.0001292965271593945, |
| "loss": 0.545, |
| "step": 399 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 0.00012911843276936777, |
| "loss": 0.3648, |
| "step": 400 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 0.00012894033837934106, |
| "loss": 0.6597, |
| "step": 401 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 0.00012876224398931433, |
| "loss": 0.8109, |
| "step": 402 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 0.00012858414959928763, |
| "loss": 0.6962, |
| "step": 403 |
| }, |
| { |
| "epoch": 1.08, |
| "learning_rate": 0.00012840605520926093, |
| "loss": 0.7175, |
| "step": 404 |
| }, |
| { |
| "epoch": 1.08, |
| "learning_rate": 0.0001282279608192342, |
| "loss": 0.0, |
| "step": 405 |
| }, |
| { |
| "epoch": 1.08, |
| "learning_rate": 0.0001280498664292075, |
| "loss": 0.8547, |
| "step": 406 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 0.00012787177203918076, |
| "loss": 0.5956, |
| "step": 407 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 0.00012769367764915406, |
| "loss": 0.6394, |
| "step": 408 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 0.00012751558325912733, |
| "loss": 0.6, |
| "step": 409 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 0.00012733748886910063, |
| "loss": 0.616, |
| "step": 410 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 0.00012715939447907392, |
| "loss": 0.6407, |
| "step": 411 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 0.0001269813000890472, |
| "loss": 0.2916, |
| "step": 412 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 0.0001268032056990205, |
| "loss": 0.6721, |
| "step": 413 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 0.00012662511130899376, |
| "loss": 0.8743, |
| "step": 414 |
| }, |
| { |
| "epoch": 1.11, |
| "learning_rate": 0.00012644701691896706, |
| "loss": 0.6431, |
| "step": 415 |
| }, |
| { |
| "epoch": 1.11, |
| "learning_rate": 0.00012626892252894033, |
| "loss": 0.7007, |
| "step": 416 |
| }, |
| { |
| "epoch": 1.11, |
| "learning_rate": 0.00012609082813891363, |
| "loss": 0.2533, |
| "step": 417 |
| }, |
| { |
| "epoch": 1.11, |
| "learning_rate": 0.00012591273374888692, |
| "loss": 0.672, |
| "step": 418 |
| }, |
| { |
| "epoch": 1.12, |
| "learning_rate": 0.00012573463935886022, |
| "loss": 0.3408, |
| "step": 419 |
| }, |
| { |
| "epoch": 1.12, |
| "learning_rate": 0.0001255565449688335, |
| "loss": 0.7538, |
| "step": 420 |
| }, |
| { |
| "epoch": 1.12, |
| "learning_rate": 0.00012537845057880676, |
| "loss": 0.3449, |
| "step": 421 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 0.00012520035618878006, |
| "loss": 0.7256, |
| "step": 422 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 0.00012502226179875333, |
| "loss": 0.7291, |
| "step": 423 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 0.00012484416740872665, |
| "loss": 0.6065, |
| "step": 424 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 0.00012466607301869992, |
| "loss": 0.635, |
| "step": 425 |
| }, |
| { |
| "epoch": 1.14, |
| "learning_rate": 0.00012448797862867322, |
| "loss": 0.671, |
| "step": 426 |
| }, |
| { |
| "epoch": 1.14, |
| "learning_rate": 0.0001243098842386465, |
| "loss": 0.6314, |
| "step": 427 |
| }, |
| { |
| "epoch": 1.14, |
| "learning_rate": 0.00012413178984861979, |
| "loss": 0.6545, |
| "step": 428 |
| }, |
| { |
| "epoch": 1.14, |
| "learning_rate": 0.00012395369545859306, |
| "loss": 0.705, |
| "step": 429 |
| }, |
| { |
| "epoch": 1.15, |
| "learning_rate": 0.00012377560106856632, |
| "loss": 0.3167, |
| "step": 430 |
| }, |
| { |
| "epoch": 1.15, |
| "learning_rate": 0.00012359750667853965, |
| "loss": 0.3716, |
| "step": 431 |
| }, |
| { |
| "epoch": 1.15, |
| "learning_rate": 0.00012341941228851292, |
| "loss": 0.3234, |
| "step": 432 |
| }, |
| { |
| "epoch": 1.15, |
| "learning_rate": 0.00012324131789848622, |
| "loss": 0.5392, |
| "step": 433 |
| }, |
| { |
| "epoch": 1.16, |
| "learning_rate": 0.00012306322350845949, |
| "loss": 0.5631, |
| "step": 434 |
| }, |
| { |
| "epoch": 1.16, |
| "learning_rate": 0.00012288512911843278, |
| "loss": 0.6213, |
| "step": 435 |
| }, |
| { |
| "epoch": 1.16, |
| "learning_rate": 0.00012270703472840605, |
| "loss": 0.6517, |
| "step": 436 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 0.00012252894033837935, |
| "loss": 0.7252, |
| "step": 437 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 0.00012235084594835265, |
| "loss": 0.4931, |
| "step": 438 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 0.00012217275155832592, |
| "loss": 0.7323, |
| "step": 439 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 0.00012199465716829921, |
| "loss": 0.3804, |
| "step": 440 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 0.0001218165627782725, |
| "loss": 0.6936, |
| "step": 441 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 0.00012163846838824577, |
| "loss": 0.3008, |
| "step": 442 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 0.00012146037399821905, |
| "loss": 0.3464, |
| "step": 443 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 0.00012128227960819233, |
| "loss": 0.5404, |
| "step": 444 |
| }, |
| { |
| "epoch": 1.19, |
| "learning_rate": 0.00012110418521816565, |
| "loss": 0.6135, |
| "step": 445 |
| }, |
| { |
| "epoch": 1.19, |
| "learning_rate": 0.00012092609082813893, |
| "loss": 0.7313, |
| "step": 446 |
| }, |
| { |
| "epoch": 1.19, |
| "learning_rate": 0.00012074799643811221, |
| "loss": 0.3075, |
| "step": 447 |
| }, |
| { |
| "epoch": 1.19, |
| "learning_rate": 0.0001205699020480855, |
| "loss": 0.2808, |
| "step": 448 |
| }, |
| { |
| "epoch": 1.2, |
| "learning_rate": 0.00012039180765805878, |
| "loss": 0.2695, |
| "step": 449 |
| }, |
| { |
| "epoch": 1.2, |
| "learning_rate": 0.00012021371326803206, |
| "loss": 0.358, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.2, |
| "learning_rate": 0.00012003561887800535, |
| "loss": 0.5827, |
| "step": 451 |
| }, |
| { |
| "epoch": 1.21, |
| "learning_rate": 0.00011985752448797864, |
| "loss": 0.8548, |
| "step": 452 |
| }, |
| { |
| "epoch": 1.21, |
| "learning_rate": 0.00011967943009795193, |
| "loss": 0.53, |
| "step": 453 |
| }, |
| { |
| "epoch": 1.21, |
| "learning_rate": 0.00011950133570792521, |
| "loss": 0.3389, |
| "step": 454 |
| }, |
| { |
| "epoch": 1.21, |
| "learning_rate": 0.0001193232413178985, |
| "loss": 0.7045, |
| "step": 455 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 0.00011914514692787178, |
| "loss": 0.6601, |
| "step": 456 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 0.00011896705253784506, |
| "loss": 0.6617, |
| "step": 457 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 0.00011878895814781834, |
| "loss": 0.3759, |
| "step": 458 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 0.00011861086375779164, |
| "loss": 0.6432, |
| "step": 459 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 0.00011843276936776492, |
| "loss": 0.5865, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 0.00011825467497773821, |
| "loss": 0.6918, |
| "step": 461 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 0.00011807658058771149, |
| "loss": 0.3008, |
| "step": 462 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 0.00011789848619768478, |
| "loss": 0.565, |
| "step": 463 |
| }, |
| { |
| "epoch": 1.24, |
| "learning_rate": 0.00011772039180765806, |
| "loss": 0.3316, |
| "step": 464 |
| }, |
| { |
| "epoch": 1.24, |
| "learning_rate": 0.00011754229741763134, |
| "loss": 0.6376, |
| "step": 465 |
| }, |
| { |
| "epoch": 1.24, |
| "learning_rate": 0.00011736420302760465, |
| "loss": 0.6327, |
| "step": 466 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 0.00011718610863757792, |
| "loss": 0.2644, |
| "step": 467 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 0.0001170080142475512, |
| "loss": 0.7361, |
| "step": 468 |
| }, |
| { |
| "epoch": 1.25, |
| "eval_loss": 0.7121204137802124, |
| "eval_runtime": 3029.8509, |
| "eval_samples_per_second": 0.99, |
| "eval_steps_per_second": 0.062, |
| "step": 468 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 0.00011682991985752449, |
| "loss": 0.5681, |
| "step": 469 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 0.00011665182546749777, |
| "loss": 0.5816, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 0.00011647373107747106, |
| "loss": 0.6412, |
| "step": 471 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 0.00011629563668744434, |
| "loss": 0.6298, |
| "step": 472 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 0.00011611754229741765, |
| "loss": 0.6575, |
| "step": 473 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 0.00011593944790739093, |
| "loss": 0.652, |
| "step": 474 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 0.00011576135351736422, |
| "loss": 0.6355, |
| "step": 475 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 0.0001155832591273375, |
| "loss": 0.7028, |
| "step": 476 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 0.00011540516473731077, |
| "loss": 0.6536, |
| "step": 477 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 0.00011522707034728406, |
| "loss": 0.7111, |
| "step": 478 |
| }, |
| { |
| "epoch": 1.28, |
| "learning_rate": 0.00011504897595725734, |
| "loss": 0.3475, |
| "step": 479 |
| }, |
| { |
| "epoch": 1.28, |
| "learning_rate": 0.00011487088156723065, |
| "loss": 0.7486, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.28, |
| "learning_rate": 0.00011469278717720393, |
| "loss": 0.6859, |
| "step": 481 |
| }, |
| { |
| "epoch": 1.29, |
| "learning_rate": 0.00011451469278717722, |
| "loss": 0.6173, |
| "step": 482 |
| }, |
| { |
| "epoch": 1.29, |
| "learning_rate": 0.0001143365983971505, |
| "loss": 0.616, |
| "step": 483 |
| }, |
| { |
| "epoch": 1.29, |
| "learning_rate": 0.00011415850400712378, |
| "loss": 0.682, |
| "step": 484 |
| }, |
| { |
| "epoch": 1.29, |
| "learning_rate": 0.00011398040961709707, |
| "loss": 0.5704, |
| "step": 485 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 0.00011380231522707035, |
| "loss": 0.7536, |
| "step": 486 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 0.00011362422083704365, |
| "loss": 0.7496, |
| "step": 487 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 0.00011344612644701693, |
| "loss": 0.6929, |
| "step": 488 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 0.00011326803205699021, |
| "loss": 0.619, |
| "step": 489 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 0.0001130899376669635, |
| "loss": 0.6834, |
| "step": 490 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 0.00011291184327693678, |
| "loss": 0.6826, |
| "step": 491 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 0.00011273374888691006, |
| "loss": 0.6989, |
| "step": 492 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 0.00011255565449688335, |
| "loss": 0.3072, |
| "step": 493 |
| }, |
| { |
| "epoch": 1.32, |
| "learning_rate": 0.00011237756010685665, |
| "loss": 0.6036, |
| "step": 494 |
| }, |
| { |
| "epoch": 1.32, |
| "learning_rate": 0.00011219946571682993, |
| "loss": 0.3625, |
| "step": 495 |
| }, |
| { |
| "epoch": 1.32, |
| "learning_rate": 0.00011202137132680321, |
| "loss": 0.6862, |
| "step": 496 |
| }, |
| { |
| "epoch": 1.33, |
| "learning_rate": 0.0001118432769367765, |
| "loss": 0.7319, |
| "step": 497 |
| }, |
| { |
| "epoch": 1.33, |
| "learning_rate": 0.00011166518254674978, |
| "loss": 0.6455, |
| "step": 498 |
| }, |
| { |
| "epoch": 1.33, |
| "learning_rate": 0.00011148708815672306, |
| "loss": 0.6173, |
| "step": 499 |
| }, |
| { |
| "epoch": 1.33, |
| "learning_rate": 0.00011130899376669635, |
| "loss": 0.575, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 0.00011113089937666966, |
| "loss": 0.6085, |
| "step": 501 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 0.00011095280498664293, |
| "loss": 0.5568, |
| "step": 502 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 0.00011077471059661621, |
| "loss": 0.6058, |
| "step": 503 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 0.0001105966162065895, |
| "loss": 0.7362, |
| "step": 504 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 0.00011041852181656278, |
| "loss": 0.6131, |
| "step": 505 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 0.00011024042742653606, |
| "loss": 0.3333, |
| "step": 506 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 0.00011006233303650934, |
| "loss": 0.5854, |
| "step": 507 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 0.00010988423864648266, |
| "loss": 0.6357, |
| "step": 508 |
| }, |
| { |
| "epoch": 1.36, |
| "learning_rate": 0.00010970614425645594, |
| "loss": 0.4692, |
| "step": 509 |
| }, |
| { |
| "epoch": 1.36, |
| "learning_rate": 0.00010952804986642922, |
| "loss": 0.608, |
| "step": 510 |
| }, |
| { |
| "epoch": 1.36, |
| "learning_rate": 0.0001093499554764025, |
| "loss": 0.7262, |
| "step": 511 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 0.00010917186108637578, |
| "loss": 0.3735, |
| "step": 512 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 0.00010899376669634906, |
| "loss": 0.8646, |
| "step": 513 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 0.00010881567230632234, |
| "loss": 0.6547, |
| "step": 514 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 0.00010863757791629565, |
| "loss": 0.6472, |
| "step": 515 |
| }, |
| { |
| "epoch": 1.38, |
| "learning_rate": 0.00010845948352626894, |
| "loss": 0.2993, |
| "step": 516 |
| }, |
| { |
| "epoch": 1.38, |
| "learning_rate": 0.00010828138913624222, |
| "loss": 0.6381, |
| "step": 517 |
| }, |
| { |
| "epoch": 1.38, |
| "learning_rate": 0.0001081032947462155, |
| "loss": 0.7432, |
| "step": 518 |
| }, |
| { |
| "epoch": 1.38, |
| "learning_rate": 0.00010792520035618879, |
| "loss": 0.2643, |
| "step": 519 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 0.00010774710596616207, |
| "loss": 0.6846, |
| "step": 520 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 0.00010756901157613535, |
| "loss": 0.561, |
| "step": 521 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 0.00010739091718610865, |
| "loss": 0.6041, |
| "step": 522 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 0.00010721282279608193, |
| "loss": 0.3275, |
| "step": 523 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 0.00010703472840605522, |
| "loss": 0.322, |
| "step": 524 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 0.0001068566340160285, |
| "loss": 0.7932, |
| "step": 525 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 0.00010667853962600179, |
| "loss": 0.3419, |
| "step": 526 |
| }, |
| { |
| "epoch": 1.41, |
| "learning_rate": 0.00010650044523597507, |
| "loss": 0.2482, |
| "step": 527 |
| }, |
| { |
| "epoch": 1.41, |
| "learning_rate": 0.00010632235084594835, |
| "loss": 0.619, |
| "step": 528 |
| }, |
| { |
| "epoch": 1.41, |
| "learning_rate": 0.00010614425645592165, |
| "loss": 0.5736, |
| "step": 529 |
| }, |
| { |
| "epoch": 1.41, |
| "learning_rate": 0.00010596616206589493, |
| "loss": 0.6258, |
| "step": 530 |
| }, |
| { |
| "epoch": 1.42, |
| "learning_rate": 0.00010578806767586822, |
| "loss": 0.2708, |
| "step": 531 |
| }, |
| { |
| "epoch": 1.42, |
| "learning_rate": 0.0001056099732858415, |
| "loss": 0.7375, |
| "step": 532 |
| }, |
| { |
| "epoch": 1.42, |
| "learning_rate": 0.00010543187889581478, |
| "loss": 0.7543, |
| "step": 533 |
| }, |
| { |
| "epoch": 1.42, |
| "learning_rate": 0.00010525378450578807, |
| "loss": 0.6985, |
| "step": 534 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 0.00010507569011576135, |
| "loss": 0.6806, |
| "step": 535 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 0.00010489759572573465, |
| "loss": 0.5141, |
| "step": 536 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 0.00010471950133570793, |
| "loss": 0.6769, |
| "step": 537 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 0.00010454140694568121, |
| "loss": 0.334, |
| "step": 538 |
| }, |
| { |
| "epoch": 1.44, |
| "learning_rate": 0.0001043633125556545, |
| "loss": 0.6568, |
| "step": 539 |
| }, |
| { |
| "epoch": 1.44, |
| "learning_rate": 0.00010418521816562778, |
| "loss": 0.613, |
| "step": 540 |
| }, |
| { |
| "epoch": 1.44, |
| "learning_rate": 0.00010400712377560106, |
| "loss": 0.6049, |
| "step": 541 |
| }, |
| { |
| "epoch": 1.45, |
| "learning_rate": 0.00010382902938557435, |
| "loss": 0.3279, |
| "step": 542 |
| }, |
| { |
| "epoch": 1.45, |
| "learning_rate": 0.00010365093499554766, |
| "loss": 0.6718, |
| "step": 543 |
| }, |
| { |
| "epoch": 1.45, |
| "learning_rate": 0.00010347284060552094, |
| "loss": 0.6749, |
| "step": 544 |
| }, |
| { |
| "epoch": 1.45, |
| "learning_rate": 0.00010329474621549423, |
| "loss": 0.6168, |
| "step": 545 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 0.00010311665182546751, |
| "loss": 0.5998, |
| "step": 546 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 0.00010293855743544078, |
| "loss": 0.5491, |
| "step": 547 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 0.00010276046304541406, |
| "loss": 0.5222, |
| "step": 548 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 0.00010258236865538735, |
| "loss": 0.5417, |
| "step": 549 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 0.00010240427426536066, |
| "loss": 0.7236, |
| "step": 550 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 0.00010222617987533394, |
| "loss": 0.7101, |
| "step": 551 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 0.00010204808548530722, |
| "loss": 0.6727, |
| "step": 552 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 0.00010186999109528051, |
| "loss": 0.7365, |
| "step": 553 |
| }, |
| { |
| "epoch": 1.48, |
| "learning_rate": 0.00010169189670525379, |
| "loss": 0.2442, |
| "step": 554 |
| }, |
| { |
| "epoch": 1.48, |
| "learning_rate": 0.00010151380231522707, |
| "loss": 0.7189, |
| "step": 555 |
| }, |
| { |
| "epoch": 1.48, |
| "learning_rate": 0.00010133570792520036, |
| "loss": 0.3235, |
| "step": 556 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 0.00010115761353517366, |
| "loss": 0.6995, |
| "step": 557 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 0.00010097951914514694, |
| "loss": 0.2615, |
| "step": 558 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 0.00010080142475512022, |
| "loss": 0.5468, |
| "step": 559 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 0.0001006233303650935, |
| "loss": 0.5447, |
| "step": 560 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 0.00010044523597506679, |
| "loss": 0.6512, |
| "step": 561 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 0.00010026714158504007, |
| "loss": 0.5687, |
| "step": 562 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 0.00010008904719501336, |
| "loss": 0.5992, |
| "step": 563 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 9.991095280498664e-05, |
| "loss": 0.7338, |
| "step": 564 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 9.973285841495992e-05, |
| "loss": 0.6911, |
| "step": 565 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 9.955476402493322e-05, |
| "loss": 0.4459, |
| "step": 566 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 9.93766696349065e-05, |
| "loss": 0.7121, |
| "step": 567 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 9.919857524487979e-05, |
| "loss": 0.5589, |
| "step": 568 |
| }, |
| { |
| "epoch": 1.52, |
| "learning_rate": 9.902048085485308e-05, |
| "loss": 0.677, |
| "step": 569 |
| }, |
| { |
| "epoch": 1.52, |
| "learning_rate": 9.884238646482637e-05, |
| "loss": 0.7049, |
| "step": 570 |
| }, |
| { |
| "epoch": 1.52, |
| "learning_rate": 9.866429207479965e-05, |
| "loss": 0.6427, |
| "step": 571 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 9.848619768477293e-05, |
| "loss": 0.5861, |
| "step": 572 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 9.830810329474622e-05, |
| "loss": 0.5978, |
| "step": 573 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 9.81300089047195e-05, |
| "loss": 0.6745, |
| "step": 574 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 9.795191451469279e-05, |
| "loss": 0.6032, |
| "step": 575 |
| }, |
| { |
| "epoch": 1.54, |
| "learning_rate": 9.777382012466608e-05, |
| "loss": 0.5781, |
| "step": 576 |
| }, |
| { |
| "epoch": 1.54, |
| "learning_rate": 9.759572573463937e-05, |
| "loss": 0.7666, |
| "step": 577 |
| }, |
| { |
| "epoch": 1.54, |
| "learning_rate": 9.741763134461265e-05, |
| "loss": 0.4642, |
| "step": 578 |
| }, |
| { |
| "epoch": 1.54, |
| "learning_rate": 9.723953695458593e-05, |
| "loss": 0.2939, |
| "step": 579 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 9.706144256455923e-05, |
| "loss": 0.351, |
| "step": 580 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 9.68833481745325e-05, |
| "loss": 0.2853, |
| "step": 581 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 9.670525378450578e-05, |
| "loss": 0.6338, |
| "step": 582 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 9.652715939447908e-05, |
| "loss": 0.797, |
| "step": 583 |
| }, |
| { |
| "epoch": 1.56, |
| "learning_rate": 9.634906500445236e-05, |
| "loss": 0.2846, |
| "step": 584 |
| }, |
| { |
| "epoch": 1.56, |
| "learning_rate": 9.617097061442565e-05, |
| "loss": 0.7644, |
| "step": 585 |
| }, |
| { |
| "epoch": 1.56, |
| "learning_rate": 9.599287622439893e-05, |
| "loss": 0.3915, |
| "step": 586 |
| }, |
| { |
| "epoch": 1.57, |
| "learning_rate": 9.581478183437223e-05, |
| "loss": 0.6562, |
| "step": 587 |
| }, |
| { |
| "epoch": 1.57, |
| "learning_rate": 9.563668744434551e-05, |
| "loss": 0.7001, |
| "step": 588 |
| }, |
| { |
| "epoch": 1.57, |
| "learning_rate": 9.54585930543188e-05, |
| "loss": 0.6126, |
| "step": 589 |
| }, |
| { |
| "epoch": 1.57, |
| "learning_rate": 9.528049866429208e-05, |
| "loss": 0.7411, |
| "step": 590 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 9.510240427426536e-05, |
| "loss": 0.7062, |
| "step": 591 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 9.492430988423865e-05, |
| "loss": 0.3634, |
| "step": 592 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 9.474621549421193e-05, |
| "loss": 0.6527, |
| "step": 593 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 9.456812110418523e-05, |
| "loss": 0.6956, |
| "step": 594 |
| }, |
| { |
| "epoch": 1.59, |
| "learning_rate": 9.439002671415851e-05, |
| "loss": 0.5481, |
| "step": 595 |
| }, |
| { |
| "epoch": 1.59, |
| "learning_rate": 9.421193232413179e-05, |
| "loss": 0.3675, |
| "step": 596 |
| }, |
| { |
| "epoch": 1.59, |
| "learning_rate": 9.403383793410509e-05, |
| "loss": 0.6013, |
| "step": 597 |
| }, |
| { |
| "epoch": 1.59, |
| "learning_rate": 9.385574354407837e-05, |
| "loss": 0.5972, |
| "step": 598 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 9.367764915405164e-05, |
| "loss": 0.6441, |
| "step": 599 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 9.349955476402493e-05, |
| "loss": 0.5255, |
| "step": 600 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 9.332146037399822e-05, |
| "loss": 0.7279, |
| "step": 601 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 9.314336598397151e-05, |
| "loss": 0.603, |
| "step": 602 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 9.296527159394479e-05, |
| "loss": 0.7985, |
| "step": 603 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 9.278717720391809e-05, |
| "loss": 0.645, |
| "step": 604 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 9.260908281389137e-05, |
| "loss": 0.4851, |
| "step": 605 |
| }, |
| { |
| "epoch": 1.62, |
| "learning_rate": 9.243098842386466e-05, |
| "loss": 0.6799, |
| "step": 606 |
| }, |
| { |
| "epoch": 1.62, |
| "learning_rate": 9.225289403383794e-05, |
| "loss": 0.618, |
| "step": 607 |
| }, |
| { |
| "epoch": 1.62, |
| "learning_rate": 9.207479964381122e-05, |
| "loss": 0.7106, |
| "step": 608 |
| }, |
| { |
| "epoch": 1.62, |
| "learning_rate": 9.18967052537845e-05, |
| "loss": 0.6385, |
| "step": 609 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 9.171861086375779e-05, |
| "loss": 0.5345, |
| "step": 610 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 9.154051647373109e-05, |
| "loss": 0.6337, |
| "step": 611 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 9.136242208370437e-05, |
| "loss": 0.2828, |
| "step": 612 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 9.118432769367765e-05, |
| "loss": 0.5525, |
| "step": 613 |
| }, |
| { |
| "epoch": 1.64, |
| "learning_rate": 9.100623330365094e-05, |
| "loss": 0.5905, |
| "step": 614 |
| }, |
| { |
| "epoch": 1.64, |
| "learning_rate": 9.082813891362423e-05, |
| "loss": 0.6172, |
| "step": 615 |
| }, |
| { |
| "epoch": 1.64, |
| "learning_rate": 9.06500445235975e-05, |
| "loss": 0.2531, |
| "step": 616 |
| }, |
| { |
| "epoch": 1.65, |
| "learning_rate": 9.047195013357079e-05, |
| "loss": 0.5794, |
| "step": 617 |
| }, |
| { |
| "epoch": 1.65, |
| "learning_rate": 9.029385574354408e-05, |
| "loss": 0.5981, |
| "step": 618 |
| }, |
| { |
| "epoch": 1.65, |
| "learning_rate": 9.011576135351737e-05, |
| "loss": 0.6818, |
| "step": 619 |
| }, |
| { |
| "epoch": 1.65, |
| "learning_rate": 8.993766696349065e-05, |
| "loss": 0.4925, |
| "step": 620 |
| }, |
| { |
| "epoch": 1.66, |
| "learning_rate": 8.975957257346394e-05, |
| "loss": 0.6088, |
| "step": 621 |
| }, |
| { |
| "epoch": 1.66, |
| "learning_rate": 8.958147818343723e-05, |
| "loss": 0.3952, |
| "step": 622 |
| }, |
| { |
| "epoch": 1.66, |
| "learning_rate": 8.940338379341052e-05, |
| "loss": 0.7616, |
| "step": 623 |
| }, |
| { |
| "epoch": 1.66, |
| "learning_rate": 8.92252894033838e-05, |
| "loss": 0.2656, |
| "step": 624 |
| }, |
| { |
| "epoch": 1.66, |
| "eval_loss": 0.703172504901886, |
| "eval_runtime": 3013.5535, |
| "eval_samples_per_second": 0.996, |
| "eval_steps_per_second": 0.062, |
| "step": 624 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 8.904719501335708e-05, |
| "loss": 0.3456, |
| "step": 625 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 8.886910062333037e-05, |
| "loss": 0.676, |
| "step": 626 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 8.869100623330365e-05, |
| "loss": 0.5272, |
| "step": 627 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 8.851291184327693e-05, |
| "loss": 0.6658, |
| "step": 628 |
| }, |
| { |
| "epoch": 1.68, |
| "learning_rate": 8.833481745325023e-05, |
| "loss": 0.6169, |
| "step": 629 |
| }, |
| { |
| "epoch": 1.68, |
| "learning_rate": 8.815672306322351e-05, |
| "loss": 0.6002, |
| "step": 630 |
| }, |
| { |
| "epoch": 1.68, |
| "learning_rate": 8.79786286731968e-05, |
| "loss": 0.7356, |
| "step": 631 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 8.78005342831701e-05, |
| "loss": 0.6897, |
| "step": 632 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 8.762243989314338e-05, |
| "loss": 0.2658, |
| "step": 633 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 8.744434550311665e-05, |
| "loss": 0.6747, |
| "step": 634 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 8.726625111308994e-05, |
| "loss": 0.6006, |
| "step": 635 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 8.708815672306323e-05, |
| "loss": 0.6688, |
| "step": 636 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 8.691006233303651e-05, |
| "loss": 0.0, |
| "step": 637 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 8.67319679430098e-05, |
| "loss": 0.6671, |
| "step": 638 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 8.655387355298309e-05, |
| "loss": 0.5724, |
| "step": 639 |
| }, |
| { |
| "epoch": 1.71, |
| "learning_rate": 8.637577916295638e-05, |
| "loss": 0.6995, |
| "step": 640 |
| }, |
| { |
| "epoch": 1.71, |
| "learning_rate": 8.619768477292966e-05, |
| "loss": 0.3299, |
| "step": 641 |
| }, |
| { |
| "epoch": 1.71, |
| "learning_rate": 8.601959038290294e-05, |
| "loss": 0.6205, |
| "step": 642 |
| }, |
| { |
| "epoch": 1.71, |
| "learning_rate": 8.584149599287623e-05, |
| "loss": 0.6103, |
| "step": 643 |
| }, |
| { |
| "epoch": 1.72, |
| "learning_rate": 8.566340160284951e-05, |
| "loss": 0.5643, |
| "step": 644 |
| }, |
| { |
| "epoch": 1.72, |
| "learning_rate": 8.54853072128228e-05, |
| "loss": 0.7474, |
| "step": 645 |
| }, |
| { |
| "epoch": 1.72, |
| "learning_rate": 8.530721282279609e-05, |
| "loss": 0.0, |
| "step": 646 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 8.512911843276937e-05, |
| "loss": 0.7264, |
| "step": 647 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 8.495102404274266e-05, |
| "loss": 0.8836, |
| "step": 648 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 8.477292965271595e-05, |
| "loss": 0.7588, |
| "step": 649 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 8.459483526268924e-05, |
| "loss": 0.6704, |
| "step": 650 |
| }, |
| { |
| "epoch": 1.74, |
| "learning_rate": 8.441674087266251e-05, |
| "loss": 0.5627, |
| "step": 651 |
| }, |
| { |
| "epoch": 1.74, |
| "learning_rate": 8.423864648263579e-05, |
| "loss": 0.5401, |
| "step": 652 |
| }, |
| { |
| "epoch": 1.74, |
| "learning_rate": 8.406055209260909e-05, |
| "loss": 0.8335, |
| "step": 653 |
| }, |
| { |
| "epoch": 1.74, |
| "learning_rate": 8.388245770258237e-05, |
| "loss": 0.6584, |
| "step": 654 |
| }, |
| { |
| "epoch": 1.75, |
| "learning_rate": 8.370436331255566e-05, |
| "loss": 0.5991, |
| "step": 655 |
| }, |
| { |
| "epoch": 1.75, |
| "learning_rate": 8.352626892252895e-05, |
| "loss": 0.6626, |
| "step": 656 |
| }, |
| { |
| "epoch": 1.75, |
| "learning_rate": 8.334817453250224e-05, |
| "loss": 0.5727, |
| "step": 657 |
| }, |
| { |
| "epoch": 1.75, |
| "learning_rate": 8.317008014247552e-05, |
| "loss": 0.5497, |
| "step": 658 |
| }, |
| { |
| "epoch": 1.76, |
| "learning_rate": 8.29919857524488e-05, |
| "loss": 0.7755, |
| "step": 659 |
| }, |
| { |
| "epoch": 1.76, |
| "learning_rate": 8.281389136242209e-05, |
| "loss": 0.6321, |
| "step": 660 |
| }, |
| { |
| "epoch": 1.76, |
| "learning_rate": 8.263579697239537e-05, |
| "loss": 0.6396, |
| "step": 661 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 8.245770258236865e-05, |
| "loss": 0.6499, |
| "step": 662 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 8.227960819234195e-05, |
| "loss": 0.7268, |
| "step": 663 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 8.210151380231523e-05, |
| "loss": 0.7767, |
| "step": 664 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 8.192341941228852e-05, |
| "loss": 0.5954, |
| "step": 665 |
| }, |
| { |
| "epoch": 1.78, |
| "learning_rate": 8.17453250222618e-05, |
| "loss": 0.5569, |
| "step": 666 |
| }, |
| { |
| "epoch": 1.78, |
| "learning_rate": 8.15672306322351e-05, |
| "loss": 0.4631, |
| "step": 667 |
| }, |
| { |
| "epoch": 1.78, |
| "learning_rate": 8.138913624220837e-05, |
| "loss": 0.5095, |
| "step": 668 |
| }, |
| { |
| "epoch": 1.78, |
| "learning_rate": 8.121104185218165e-05, |
| "loss": 0.5156, |
| "step": 669 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 8.103294746215495e-05, |
| "loss": 0.7886, |
| "step": 670 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 8.085485307212823e-05, |
| "loss": 0.7179, |
| "step": 671 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 8.067675868210152e-05, |
| "loss": 0.3063, |
| "step": 672 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 8.04986642920748e-05, |
| "loss": 0.6799, |
| "step": 673 |
| }, |
| { |
| "epoch": 1.8, |
| "learning_rate": 8.03205699020481e-05, |
| "loss": 0.4453, |
| "step": 674 |
| }, |
| { |
| "epoch": 1.8, |
| "learning_rate": 8.014247551202138e-05, |
| "loss": 0.6371, |
| "step": 675 |
| }, |
| { |
| "epoch": 1.8, |
| "learning_rate": 7.996438112199466e-05, |
| "loss": 0.6388, |
| "step": 676 |
| }, |
| { |
| "epoch": 1.81, |
| "learning_rate": 7.978628673196795e-05, |
| "loss": 0.5928, |
| "step": 677 |
| }, |
| { |
| "epoch": 1.81, |
| "learning_rate": 7.960819234194123e-05, |
| "loss": 0.7315, |
| "step": 678 |
| }, |
| { |
| "epoch": 1.81, |
| "learning_rate": 7.943009795191451e-05, |
| "loss": 0.3025, |
| "step": 679 |
| }, |
| { |
| "epoch": 1.81, |
| "learning_rate": 7.92520035618878e-05, |
| "loss": 0.6875, |
| "step": 680 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 7.90739091718611e-05, |
| "loss": 0.6911, |
| "step": 681 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 7.889581478183438e-05, |
| "loss": 0.5739, |
| "step": 682 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 7.871772039180766e-05, |
| "loss": 0.5978, |
| "step": 683 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 7.853962600178096e-05, |
| "loss": 0.6027, |
| "step": 684 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 7.836153161175424e-05, |
| "loss": 0.6756, |
| "step": 685 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 7.818343722172751e-05, |
| "loss": 0.4417, |
| "step": 686 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 7.80053428317008e-05, |
| "loss": 0.5446, |
| "step": 687 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 7.782724844167409e-05, |
| "loss": 0.5871, |
| "step": 688 |
| }, |
| { |
| "epoch": 1.84, |
| "learning_rate": 7.764915405164738e-05, |
| "loss": 0.5769, |
| "step": 689 |
| }, |
| { |
| "epoch": 1.84, |
| "learning_rate": 7.747105966162066e-05, |
| "loss": 0.6795, |
| "step": 690 |
| }, |
| { |
| "epoch": 1.84, |
| "learning_rate": 7.729296527159396e-05, |
| "loss": 0.7031, |
| "step": 691 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 7.711487088156724e-05, |
| "loss": 0.611, |
| "step": 692 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 7.693677649154052e-05, |
| "loss": 0.6716, |
| "step": 693 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 7.675868210151381e-05, |
| "loss": 0.5719, |
| "step": 694 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 7.658058771148709e-05, |
| "loss": 0.2431, |
| "step": 695 |
| }, |
| { |
| "epoch": 1.86, |
| "learning_rate": 7.640249332146037e-05, |
| "loss": 0.6719, |
| "step": 696 |
| }, |
| { |
| "epoch": 1.86, |
| "learning_rate": 7.622439893143366e-05, |
| "loss": 0.6299, |
| "step": 697 |
| }, |
| { |
| "epoch": 1.86, |
| "learning_rate": 7.604630454140695e-05, |
| "loss": 0.7345, |
| "step": 698 |
| }, |
| { |
| "epoch": 1.86, |
| "learning_rate": 7.586821015138024e-05, |
| "loss": 0.3209, |
| "step": 699 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 7.569011576135352e-05, |
| "loss": 0.6855, |
| "step": 700 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 7.55120213713268e-05, |
| "loss": 0.6889, |
| "step": 701 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 7.53339269813001e-05, |
| "loss": 0.7942, |
| "step": 702 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 7.515583259127337e-05, |
| "loss": 0.6145, |
| "step": 703 |
| }, |
| { |
| "epoch": 1.88, |
| "learning_rate": 7.497773820124666e-05, |
| "loss": 0.6379, |
| "step": 704 |
| }, |
| { |
| "epoch": 1.88, |
| "learning_rate": 7.479964381121995e-05, |
| "loss": 0.8225, |
| "step": 705 |
| }, |
| { |
| "epoch": 1.88, |
| "learning_rate": 7.462154942119324e-05, |
| "loss": 0.7393, |
| "step": 706 |
| }, |
| { |
| "epoch": 1.89, |
| "learning_rate": 7.444345503116652e-05, |
| "loss": 0.2515, |
| "step": 707 |
| }, |
| { |
| "epoch": 1.89, |
| "learning_rate": 7.42653606411398e-05, |
| "loss": 0.7343, |
| "step": 708 |
| }, |
| { |
| "epoch": 1.89, |
| "learning_rate": 7.40872662511131e-05, |
| "loss": 0.7412, |
| "step": 709 |
| }, |
| { |
| "epoch": 1.89, |
| "learning_rate": 7.390917186108638e-05, |
| "loss": 0.631, |
| "step": 710 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 7.373107747105967e-05, |
| "loss": 0.6821, |
| "step": 711 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 7.355298308103295e-05, |
| "loss": 0.6614, |
| "step": 712 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 7.337488869100623e-05, |
| "loss": 0.7726, |
| "step": 713 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 7.319679430097952e-05, |
| "loss": 0.3381, |
| "step": 714 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 7.30186999109528e-05, |
| "loss": 0.5583, |
| "step": 715 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 7.28406055209261e-05, |
| "loss": 0.6351, |
| "step": 716 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 7.266251113089938e-05, |
| "loss": 0.639, |
| "step": 717 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 7.248441674087267e-05, |
| "loss": 0.7555, |
| "step": 718 |
| }, |
| { |
| "epoch": 1.92, |
| "learning_rate": 7.230632235084596e-05, |
| "loss": 0.5269, |
| "step": 719 |
| }, |
| { |
| "epoch": 1.92, |
| "learning_rate": 7.212822796081923e-05, |
| "loss": 0.6324, |
| "step": 720 |
| }, |
| { |
| "epoch": 1.92, |
| "learning_rate": 7.195013357079252e-05, |
| "loss": 0.568, |
| "step": 721 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 7.17720391807658e-05, |
| "loss": 0.5796, |
| "step": 722 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 7.15939447907391e-05, |
| "loss": 0.6747, |
| "step": 723 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 7.141585040071238e-05, |
| "loss": 0.4163, |
| "step": 724 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 7.123775601068566e-05, |
| "loss": 0.5695, |
| "step": 725 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 7.105966162065896e-05, |
| "loss": 0.6616, |
| "step": 726 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 7.088156723063224e-05, |
| "loss": 0.3678, |
| "step": 727 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 7.070347284060553e-05, |
| "loss": 0.3138, |
| "step": 728 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 7.052537845057881e-05, |
| "loss": 0.7429, |
| "step": 729 |
| }, |
| { |
| "epoch": 1.95, |
| "learning_rate": 7.03472840605521e-05, |
| "loss": 0.7671, |
| "step": 730 |
| }, |
| { |
| "epoch": 1.95, |
| "learning_rate": 7.016918967052538e-05, |
| "loss": 0.2879, |
| "step": 731 |
| }, |
| { |
| "epoch": 1.95, |
| "learning_rate": 6.999109528049866e-05, |
| "loss": 0.6856, |
| "step": 732 |
| }, |
| { |
| "epoch": 1.95, |
| "learning_rate": 6.981300089047196e-05, |
| "loss": 0.2531, |
| "step": 733 |
| }, |
| { |
| "epoch": 1.96, |
| "learning_rate": 6.963490650044524e-05, |
| "loss": 0.6888, |
| "step": 734 |
| }, |
| { |
| "epoch": 1.96, |
| "learning_rate": 6.945681211041853e-05, |
| "loss": 0.3946, |
| "step": 735 |
| }, |
| { |
| "epoch": 1.96, |
| "learning_rate": 6.927871772039181e-05, |
| "loss": 0.7019, |
| "step": 736 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 6.91006233303651e-05, |
| "loss": 0.2584, |
| "step": 737 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 6.892252894033838e-05, |
| "loss": 0.6617, |
| "step": 738 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 6.874443455031166e-05, |
| "loss": 0.2846, |
| "step": 739 |
| }, |
| { |
| "epoch": 1.97, |
| "learning_rate": 6.856634016028496e-05, |
| "loss": 0.6521, |
| "step": 740 |
| }, |
| { |
| "epoch": 1.98, |
| "learning_rate": 6.838824577025824e-05, |
| "loss": 0.5318, |
| "step": 741 |
| }, |
| { |
| "epoch": 1.98, |
| "learning_rate": 6.821015138023152e-05, |
| "loss": 0.7488, |
| "step": 742 |
| }, |
| { |
| "epoch": 1.98, |
| "learning_rate": 6.803205699020481e-05, |
| "loss": 0.6779, |
| "step": 743 |
| }, |
| { |
| "epoch": 1.98, |
| "learning_rate": 6.78539626001781e-05, |
| "loss": 0.7625, |
| "step": 744 |
| }, |
| { |
| "epoch": 1.99, |
| "learning_rate": 6.767586821015139e-05, |
| "loss": 0.7302, |
| "step": 745 |
| }, |
| { |
| "epoch": 1.99, |
| "learning_rate": 6.749777382012467e-05, |
| "loss": 0.5564, |
| "step": 746 |
| }, |
| { |
| "epoch": 1.99, |
| "learning_rate": 6.731967943009795e-05, |
| "loss": 0.5948, |
| "step": 747 |
| }, |
| { |
| "epoch": 1.99, |
| "learning_rate": 6.714158504007124e-05, |
| "loss": 0.8588, |
| "step": 748 |
| }, |
| { |
| "epoch": 2.0, |
| "learning_rate": 6.696349065004452e-05, |
| "loss": 0.5765, |
| "step": 749 |
| }, |
| { |
| "epoch": 2.0, |
| "learning_rate": 6.67853962600178e-05, |
| "loss": 0.6925, |
| "step": 750 |
| }, |
| { |
| "epoch": 2.0, |
| "learning_rate": 6.66073018699911e-05, |
| "loss": 0.4935, |
| "step": 751 |
| }, |
| { |
| "epoch": 2.01, |
| "learning_rate": 6.642920747996439e-05, |
| "loss": 0.5056, |
| "step": 752 |
| }, |
| { |
| "epoch": 2.01, |
| "learning_rate": 6.625111308993767e-05, |
| "loss": 0.5514, |
| "step": 753 |
| }, |
| { |
| "epoch": 2.01, |
| "learning_rate": 6.607301869991097e-05, |
| "loss": 0.3091, |
| "step": 754 |
| }, |
| { |
| "epoch": 2.01, |
| "learning_rate": 6.589492430988424e-05, |
| "loss": 0.6602, |
| "step": 755 |
| }, |
| { |
| "epoch": 2.02, |
| "learning_rate": 6.571682991985752e-05, |
| "loss": 0.6998, |
| "step": 756 |
| }, |
| { |
| "epoch": 2.02, |
| "learning_rate": 6.55387355298308e-05, |
| "loss": 0.4589, |
| "step": 757 |
| }, |
| { |
| "epoch": 2.02, |
| "learning_rate": 6.53606411398041e-05, |
| "loss": 0.0, |
| "step": 758 |
| }, |
| { |
| "epoch": 2.02, |
| "learning_rate": 6.518254674977738e-05, |
| "loss": 0.0, |
| "step": 759 |
| }, |
| { |
| "epoch": 2.03, |
| "learning_rate": 6.500445235975067e-05, |
| "loss": 0.523, |
| "step": 760 |
| }, |
| { |
| "epoch": 2.03, |
| "learning_rate": 6.482635796972396e-05, |
| "loss": 0.4522, |
| "step": 761 |
| }, |
| { |
| "epoch": 2.03, |
| "learning_rate": 6.464826357969725e-05, |
| "loss": 0.556, |
| "step": 762 |
| }, |
| { |
| "epoch": 2.03, |
| "learning_rate": 6.447016918967053e-05, |
| "loss": 0.5423, |
| "step": 763 |
| }, |
| { |
| "epoch": 2.04, |
| "learning_rate": 6.429207479964381e-05, |
| "loss": 0.4313, |
| "step": 764 |
| }, |
| { |
| "epoch": 2.04, |
| "learning_rate": 6.41139804096171e-05, |
| "loss": 0.6231, |
| "step": 765 |
| }, |
| { |
| "epoch": 2.04, |
| "learning_rate": 6.393588601959038e-05, |
| "loss": 0.4847, |
| "step": 766 |
| }, |
| { |
| "epoch": 2.05, |
| "learning_rate": 6.375779162956367e-05, |
| "loss": 0.5055, |
| "step": 767 |
| }, |
| { |
| "epoch": 2.05, |
| "learning_rate": 6.357969723953696e-05, |
| "loss": 0.4828, |
| "step": 768 |
| }, |
| { |
| "epoch": 2.05, |
| "learning_rate": 6.340160284951025e-05, |
| "loss": 0.5674, |
| "step": 769 |
| }, |
| { |
| "epoch": 2.05, |
| "learning_rate": 6.322350845948353e-05, |
| "loss": 0.5898, |
| "step": 770 |
| }, |
| { |
| "epoch": 2.06, |
| "learning_rate": 6.304541406945681e-05, |
| "loss": 0.3173, |
| "step": 771 |
| }, |
| { |
| "epoch": 2.06, |
| "learning_rate": 6.286731967943011e-05, |
| "loss": 0.6083, |
| "step": 772 |
| }, |
| { |
| "epoch": 2.06, |
| "learning_rate": 6.268922528940338e-05, |
| "loss": 0.2222, |
| "step": 773 |
| }, |
| { |
| "epoch": 2.06, |
| "learning_rate": 6.251113089937666e-05, |
| "loss": 0.4865, |
| "step": 774 |
| }, |
| { |
| "epoch": 2.07, |
| "learning_rate": 6.233303650934996e-05, |
| "loss": 0.5414, |
| "step": 775 |
| }, |
| { |
| "epoch": 2.07, |
| "learning_rate": 6.215494211932324e-05, |
| "loss": 0.6213, |
| "step": 776 |
| }, |
| { |
| "epoch": 2.07, |
| "learning_rate": 6.197684772929653e-05, |
| "loss": 0.4933, |
| "step": 777 |
| }, |
| { |
| "epoch": 2.07, |
| "learning_rate": 6.179875333926982e-05, |
| "loss": 0.4604, |
| "step": 778 |
| }, |
| { |
| "epoch": 2.08, |
| "learning_rate": 6.162065894924311e-05, |
| "loss": 0.5353, |
| "step": 779 |
| }, |
| { |
| "epoch": 2.08, |
| "learning_rate": 6.144256455921639e-05, |
| "loss": 0.5483, |
| "step": 780 |
| }, |
| { |
| "epoch": 2.08, |
| "eval_loss": 0.7184942364692688, |
| "eval_runtime": 3070.429, |
| "eval_samples_per_second": 0.977, |
| "eval_steps_per_second": 0.061, |
| "step": 780 |
| }, |
| { |
| "epoch": 2.08, |
| "learning_rate": 6.126447016918968e-05, |
| "loss": 0.5889, |
| "step": 781 |
| }, |
| { |
| "epoch": 2.09, |
| "learning_rate": 6.108637577916296e-05, |
| "loss": 0.5588, |
| "step": 782 |
| }, |
| { |
| "epoch": 2.09, |
| "learning_rate": 6.090828138913625e-05, |
| "loss": 0.2642, |
| "step": 783 |
| }, |
| { |
| "epoch": 2.09, |
| "learning_rate": 6.0730186999109526e-05, |
| "loss": 0.6108, |
| "step": 784 |
| }, |
| { |
| "epoch": 2.09, |
| "learning_rate": 6.055209260908282e-05, |
| "loss": 0.5766, |
| "step": 785 |
| }, |
| { |
| "epoch": 2.1, |
| "learning_rate": 6.0373998219056106e-05, |
| "loss": 0.5958, |
| "step": 786 |
| }, |
| { |
| "epoch": 2.1, |
| "learning_rate": 6.019590382902939e-05, |
| "loss": 0.4791, |
| "step": 787 |
| }, |
| { |
| "epoch": 2.1, |
| "learning_rate": 6.001780943900267e-05, |
| "loss": 0.6763, |
| "step": 788 |
| }, |
| { |
| "epoch": 2.1, |
| "learning_rate": 5.9839715048975963e-05, |
| "loss": 0.2521, |
| "step": 789 |
| }, |
| { |
| "epoch": 2.11, |
| "learning_rate": 5.966162065894925e-05, |
| "loss": 0.6006, |
| "step": 790 |
| }, |
| { |
| "epoch": 2.11, |
| "learning_rate": 5.948352626892253e-05, |
| "loss": 0.493, |
| "step": 791 |
| }, |
| { |
| "epoch": 2.11, |
| "learning_rate": 5.930543187889582e-05, |
| "loss": 0.4216, |
| "step": 792 |
| }, |
| { |
| "epoch": 2.11, |
| "learning_rate": 5.9127337488869104e-05, |
| "loss": 0.2425, |
| "step": 793 |
| }, |
| { |
| "epoch": 2.12, |
| "learning_rate": 5.894924309884239e-05, |
| "loss": 0.4613, |
| "step": 794 |
| }, |
| { |
| "epoch": 2.12, |
| "learning_rate": 5.877114870881567e-05, |
| "loss": 0.5102, |
| "step": 795 |
| }, |
| { |
| "epoch": 2.12, |
| "learning_rate": 5.859305431878896e-05, |
| "loss": 0.2585, |
| "step": 796 |
| }, |
| { |
| "epoch": 2.13, |
| "learning_rate": 5.8414959928762245e-05, |
| "loss": 0.5613, |
| "step": 797 |
| }, |
| { |
| "epoch": 2.13, |
| "learning_rate": 5.823686553873553e-05, |
| "loss": 0.5372, |
| "step": 798 |
| }, |
| { |
| "epoch": 2.13, |
| "learning_rate": 5.8058771148708826e-05, |
| "loss": 0.2502, |
| "step": 799 |
| }, |
| { |
| "epoch": 2.13, |
| "learning_rate": 5.788067675868211e-05, |
| "loss": 0.5299, |
| "step": 800 |
| }, |
| { |
| "epoch": 2.14, |
| "learning_rate": 5.7702582368655386e-05, |
| "loss": 0.5208, |
| "step": 801 |
| }, |
| { |
| "epoch": 2.14, |
| "learning_rate": 5.752448797862867e-05, |
| "loss": 0.6105, |
| "step": 802 |
| }, |
| { |
| "epoch": 2.14, |
| "learning_rate": 5.7346393588601966e-05, |
| "loss": 0.4572, |
| "step": 803 |
| }, |
| { |
| "epoch": 2.14, |
| "learning_rate": 5.716829919857525e-05, |
| "loss": 0.4496, |
| "step": 804 |
| }, |
| { |
| "epoch": 2.15, |
| "learning_rate": 5.6990204808548533e-05, |
| "loss": 0.6963, |
| "step": 805 |
| }, |
| { |
| "epoch": 2.15, |
| "learning_rate": 5.6812110418521824e-05, |
| "loss": 0.4791, |
| "step": 806 |
| }, |
| { |
| "epoch": 2.15, |
| "learning_rate": 5.663401602849511e-05, |
| "loss": 0.5877, |
| "step": 807 |
| }, |
| { |
| "epoch": 2.15, |
| "learning_rate": 5.645592163846839e-05, |
| "loss": 0.5969, |
| "step": 808 |
| }, |
| { |
| "epoch": 2.16, |
| "learning_rate": 5.6277827248441674e-05, |
| "loss": 0.6696, |
| "step": 809 |
| }, |
| { |
| "epoch": 2.16, |
| "learning_rate": 5.6099732858414964e-05, |
| "loss": 0.6348, |
| "step": 810 |
| }, |
| { |
| "epoch": 2.16, |
| "learning_rate": 5.592163846838825e-05, |
| "loss": 0.5062, |
| "step": 811 |
| }, |
| { |
| "epoch": 2.17, |
| "learning_rate": 5.574354407836153e-05, |
| "loss": 0.4852, |
| "step": 812 |
| }, |
| { |
| "epoch": 2.17, |
| "learning_rate": 5.556544968833483e-05, |
| "loss": 0.2839, |
| "step": 813 |
| }, |
| { |
| "epoch": 2.17, |
| "learning_rate": 5.5387355298308105e-05, |
| "loss": 0.3634, |
| "step": 814 |
| }, |
| { |
| "epoch": 2.17, |
| "learning_rate": 5.520926090828139e-05, |
| "loss": 0.4404, |
| "step": 815 |
| }, |
| { |
| "epoch": 2.18, |
| "learning_rate": 5.503116651825467e-05, |
| "loss": 0.5002, |
| "step": 816 |
| }, |
| { |
| "epoch": 2.18, |
| "learning_rate": 5.485307212822797e-05, |
| "loss": 0.4835, |
| "step": 817 |
| }, |
| { |
| "epoch": 2.18, |
| "learning_rate": 5.467497773820125e-05, |
| "loss": 0.0, |
| "step": 818 |
| }, |
| { |
| "epoch": 2.18, |
| "learning_rate": 5.449688334817453e-05, |
| "loss": 0.5531, |
| "step": 819 |
| }, |
| { |
| "epoch": 2.19, |
| "learning_rate": 5.4318788958147827e-05, |
| "loss": 0.5685, |
| "step": 820 |
| }, |
| { |
| "epoch": 2.19, |
| "learning_rate": 5.414069456812111e-05, |
| "loss": 0.6242, |
| "step": 821 |
| }, |
| { |
| "epoch": 2.19, |
| "learning_rate": 5.3962600178094394e-05, |
| "loss": 0.3086, |
| "step": 822 |
| }, |
| { |
| "epoch": 2.19, |
| "learning_rate": 5.378450578806768e-05, |
| "loss": 0.5342, |
| "step": 823 |
| }, |
| { |
| "epoch": 2.2, |
| "learning_rate": 5.360641139804097e-05, |
| "loss": 0.6173, |
| "step": 824 |
| }, |
| { |
| "epoch": 2.2, |
| "learning_rate": 5.342831700801425e-05, |
| "loss": 0.5492, |
| "step": 825 |
| }, |
| { |
| "epoch": 2.2, |
| "learning_rate": 5.3250222617987534e-05, |
| "loss": 0.7154, |
| "step": 826 |
| }, |
| { |
| "epoch": 2.21, |
| "learning_rate": 5.3072128227960825e-05, |
| "loss": 0.4763, |
| "step": 827 |
| }, |
| { |
| "epoch": 2.21, |
| "learning_rate": 5.289403383793411e-05, |
| "loss": 0.5074, |
| "step": 828 |
| }, |
| { |
| "epoch": 2.21, |
| "learning_rate": 5.271593944790739e-05, |
| "loss": 0.6183, |
| "step": 829 |
| }, |
| { |
| "epoch": 2.21, |
| "learning_rate": 5.2537845057880675e-05, |
| "loss": 0.3336, |
| "step": 830 |
| }, |
| { |
| "epoch": 2.22, |
| "learning_rate": 5.2359750667853965e-05, |
| "loss": 0.3096, |
| "step": 831 |
| }, |
| { |
| "epoch": 2.22, |
| "learning_rate": 5.218165627782725e-05, |
| "loss": 0.5492, |
| "step": 832 |
| }, |
| { |
| "epoch": 2.22, |
| "learning_rate": 5.200356188780053e-05, |
| "loss": 0.5015, |
| "step": 833 |
| }, |
| { |
| "epoch": 2.22, |
| "learning_rate": 5.182546749777383e-05, |
| "loss": 0.2283, |
| "step": 834 |
| }, |
| { |
| "epoch": 2.23, |
| "learning_rate": 5.164737310774711e-05, |
| "loss": 0.6955, |
| "step": 835 |
| }, |
| { |
| "epoch": 2.23, |
| "learning_rate": 5.146927871772039e-05, |
| "loss": 0.5242, |
| "step": 836 |
| }, |
| { |
| "epoch": 2.23, |
| "learning_rate": 5.129118432769367e-05, |
| "loss": 0.5285, |
| "step": 837 |
| }, |
| { |
| "epoch": 2.23, |
| "learning_rate": 5.111308993766697e-05, |
| "loss": 0.6641, |
| "step": 838 |
| }, |
| { |
| "epoch": 2.24, |
| "learning_rate": 5.0934995547640254e-05, |
| "loss": 0.4676, |
| "step": 839 |
| }, |
| { |
| "epoch": 2.24, |
| "learning_rate": 5.075690115761354e-05, |
| "loss": 0.4343, |
| "step": 840 |
| }, |
| { |
| "epoch": 2.24, |
| "learning_rate": 5.057880676758683e-05, |
| "loss": 0.6591, |
| "step": 841 |
| }, |
| { |
| "epoch": 2.25, |
| "learning_rate": 5.040071237756011e-05, |
| "loss": 0.7085, |
| "step": 842 |
| }, |
| { |
| "epoch": 2.25, |
| "learning_rate": 5.0222617987533395e-05, |
| "loss": 0.4633, |
| "step": 843 |
| }, |
| { |
| "epoch": 2.25, |
| "learning_rate": 5.004452359750668e-05, |
| "loss": 0.2814, |
| "step": 844 |
| }, |
| { |
| "epoch": 2.25, |
| "learning_rate": 4.986642920747996e-05, |
| "loss": 0.6218, |
| "step": 845 |
| }, |
| { |
| "epoch": 2.26, |
| "learning_rate": 4.968833481745325e-05, |
| "loss": 0.6177, |
| "step": 846 |
| }, |
| { |
| "epoch": 2.26, |
| "learning_rate": 4.951024042742654e-05, |
| "loss": 0.3615, |
| "step": 847 |
| }, |
| { |
| "epoch": 2.26, |
| "learning_rate": 4.9332146037399826e-05, |
| "loss": 0.4262, |
| "step": 848 |
| }, |
| { |
| "epoch": 2.26, |
| "learning_rate": 4.915405164737311e-05, |
| "loss": 0.5926, |
| "step": 849 |
| }, |
| { |
| "epoch": 2.27, |
| "learning_rate": 4.897595725734639e-05, |
| "loss": 0.5194, |
| "step": 850 |
| }, |
| { |
| "epoch": 2.27, |
| "learning_rate": 4.879786286731968e-05, |
| "loss": 0.7846, |
| "step": 851 |
| }, |
| { |
| "epoch": 2.27, |
| "learning_rate": 4.8619768477292966e-05, |
| "loss": 0.6291, |
| "step": 852 |
| }, |
| { |
| "epoch": 2.27, |
| "learning_rate": 4.844167408726625e-05, |
| "loss": 0.5293, |
| "step": 853 |
| }, |
| { |
| "epoch": 2.28, |
| "learning_rate": 4.826357969723954e-05, |
| "loss": 0.7641, |
| "step": 854 |
| }, |
| { |
| "epoch": 2.28, |
| "learning_rate": 4.8085485307212824e-05, |
| "loss": 0.5638, |
| "step": 855 |
| }, |
| { |
| "epoch": 2.28, |
| "learning_rate": 4.7907390917186114e-05, |
| "loss": 0.4537, |
| "step": 856 |
| }, |
| { |
| "epoch": 2.29, |
| "learning_rate": 4.77292965271594e-05, |
| "loss": 0.5004, |
| "step": 857 |
| }, |
| { |
| "epoch": 2.29, |
| "learning_rate": 4.755120213713268e-05, |
| "loss": 0.5058, |
| "step": 858 |
| }, |
| { |
| "epoch": 2.29, |
| "learning_rate": 4.7373107747105965e-05, |
| "loss": 0.6613, |
| "step": 859 |
| }, |
| { |
| "epoch": 2.29, |
| "learning_rate": 4.7195013357079255e-05, |
| "loss": 0.5536, |
| "step": 860 |
| }, |
| { |
| "epoch": 2.3, |
| "learning_rate": 4.7016918967052545e-05, |
| "loss": 0.2145, |
| "step": 861 |
| }, |
| { |
| "epoch": 2.3, |
| "learning_rate": 4.683882457702582e-05, |
| "loss": 0.6103, |
| "step": 862 |
| }, |
| { |
| "epoch": 2.3, |
| "learning_rate": 4.666073018699911e-05, |
| "loss": 0.2994, |
| "step": 863 |
| }, |
| { |
| "epoch": 2.3, |
| "learning_rate": 4.6482635796972396e-05, |
| "loss": 0.2918, |
| "step": 864 |
| }, |
| { |
| "epoch": 2.31, |
| "learning_rate": 4.6304541406945686e-05, |
| "loss": 0.4479, |
| "step": 865 |
| }, |
| { |
| "epoch": 2.31, |
| "learning_rate": 4.612644701691897e-05, |
| "loss": 0.5615, |
| "step": 866 |
| }, |
| { |
| "epoch": 2.31, |
| "learning_rate": 4.594835262689225e-05, |
| "loss": 0.5611, |
| "step": 867 |
| }, |
| { |
| "epoch": 2.31, |
| "learning_rate": 4.577025823686554e-05, |
| "loss": 0.4812, |
| "step": 868 |
| }, |
| { |
| "epoch": 2.32, |
| "learning_rate": 4.559216384683883e-05, |
| "loss": 0.4333, |
| "step": 869 |
| }, |
| { |
| "epoch": 2.32, |
| "learning_rate": 4.541406945681212e-05, |
| "loss": 0.6103, |
| "step": 870 |
| }, |
| { |
| "epoch": 2.32, |
| "learning_rate": 4.5235975066785394e-05, |
| "loss": 0.4906, |
| "step": 871 |
| }, |
| { |
| "epoch": 2.33, |
| "learning_rate": 4.5057880676758684e-05, |
| "loss": 0.5529, |
| "step": 872 |
| }, |
| { |
| "epoch": 2.33, |
| "learning_rate": 4.487978628673197e-05, |
| "loss": 0.6428, |
| "step": 873 |
| }, |
| { |
| "epoch": 2.33, |
| "learning_rate": 4.470169189670526e-05, |
| "loss": 0.5908, |
| "step": 874 |
| }, |
| { |
| "epoch": 2.33, |
| "learning_rate": 4.452359750667854e-05, |
| "loss": 0.7357, |
| "step": 875 |
| }, |
| { |
| "epoch": 2.34, |
| "learning_rate": 4.4345503116651825e-05, |
| "loss": 0.5283, |
| "step": 876 |
| }, |
| { |
| "epoch": 2.34, |
| "learning_rate": 4.4167408726625115e-05, |
| "loss": 0.5263, |
| "step": 877 |
| }, |
| { |
| "epoch": 2.34, |
| "learning_rate": 4.39893143365984e-05, |
| "loss": 0.5833, |
| "step": 878 |
| }, |
| { |
| "epoch": 2.34, |
| "learning_rate": 4.381121994657169e-05, |
| "loss": 0.4495, |
| "step": 879 |
| }, |
| { |
| "epoch": 2.35, |
| "learning_rate": 4.363312555654497e-05, |
| "loss": 0.5486, |
| "step": 880 |
| }, |
| { |
| "epoch": 2.35, |
| "learning_rate": 4.3455031166518256e-05, |
| "loss": 0.4974, |
| "step": 881 |
| }, |
| { |
| "epoch": 2.35, |
| "learning_rate": 4.3276936776491546e-05, |
| "loss": 0.5267, |
| "step": 882 |
| }, |
| { |
| "epoch": 2.35, |
| "learning_rate": 4.309884238646483e-05, |
| "loss": 0.5541, |
| "step": 883 |
| }, |
| { |
| "epoch": 2.36, |
| "learning_rate": 4.292074799643811e-05, |
| "loss": 0.5155, |
| "step": 884 |
| }, |
| { |
| "epoch": 2.36, |
| "learning_rate": 4.27426536064114e-05, |
| "loss": 0.7037, |
| "step": 885 |
| }, |
| { |
| "epoch": 2.36, |
| "learning_rate": 4.256455921638469e-05, |
| "loss": 0.4534, |
| "step": 886 |
| }, |
| { |
| "epoch": 2.37, |
| "learning_rate": 4.238646482635798e-05, |
| "loss": 0.315, |
| "step": 887 |
| }, |
| { |
| "epoch": 2.37, |
| "learning_rate": 4.2208370436331254e-05, |
| "loss": 0.4836, |
| "step": 888 |
| }, |
| { |
| "epoch": 2.37, |
| "learning_rate": 4.2030276046304544e-05, |
| "loss": 0.5279, |
| "step": 889 |
| }, |
| { |
| "epoch": 2.37, |
| "learning_rate": 4.185218165627783e-05, |
| "loss": 0.4331, |
| "step": 890 |
| }, |
| { |
| "epoch": 2.38, |
| "learning_rate": 4.167408726625112e-05, |
| "loss": 0.2976, |
| "step": 891 |
| }, |
| { |
| "epoch": 2.38, |
| "learning_rate": 4.14959928762244e-05, |
| "loss": 0.4118, |
| "step": 892 |
| }, |
| { |
| "epoch": 2.38, |
| "learning_rate": 4.1317898486197685e-05, |
| "loss": 0.4208, |
| "step": 893 |
| }, |
| { |
| "epoch": 2.38, |
| "learning_rate": 4.1139804096170975e-05, |
| "loss": 0.5812, |
| "step": 894 |
| }, |
| { |
| "epoch": 2.39, |
| "learning_rate": 4.096170970614426e-05, |
| "loss": 0.3178, |
| "step": 895 |
| }, |
| { |
| "epoch": 2.39, |
| "learning_rate": 4.078361531611755e-05, |
| "loss": 0.0, |
| "step": 896 |
| }, |
| { |
| "epoch": 2.39, |
| "learning_rate": 4.0605520926090826e-05, |
| "loss": 0.5466, |
| "step": 897 |
| }, |
| { |
| "epoch": 2.39, |
| "learning_rate": 4.0427426536064116e-05, |
| "loss": 0.5901, |
| "step": 898 |
| }, |
| { |
| "epoch": 2.4, |
| "learning_rate": 4.02493321460374e-05, |
| "loss": 0.4959, |
| "step": 899 |
| }, |
| { |
| "epoch": 2.4, |
| "learning_rate": 4.007123775601069e-05, |
| "loss": 0.4362, |
| "step": 900 |
| }, |
| { |
| "epoch": 2.4, |
| "learning_rate": 3.989314336598397e-05, |
| "loss": 0.651, |
| "step": 901 |
| }, |
| { |
| "epoch": 2.41, |
| "learning_rate": 3.971504897595726e-05, |
| "loss": 0.4988, |
| "step": 902 |
| }, |
| { |
| "epoch": 2.41, |
| "learning_rate": 3.953695458593055e-05, |
| "loss": 0.4345, |
| "step": 903 |
| }, |
| { |
| "epoch": 2.41, |
| "learning_rate": 3.935886019590383e-05, |
| "loss": 0.5049, |
| "step": 904 |
| }, |
| { |
| "epoch": 2.41, |
| "learning_rate": 3.918076580587712e-05, |
| "loss": 0.5505, |
| "step": 905 |
| }, |
| { |
| "epoch": 2.42, |
| "learning_rate": 3.90026714158504e-05, |
| "loss": 0.543, |
| "step": 906 |
| }, |
| { |
| "epoch": 2.42, |
| "learning_rate": 3.882457702582369e-05, |
| "loss": 0.5359, |
| "step": 907 |
| }, |
| { |
| "epoch": 2.42, |
| "learning_rate": 3.864648263579698e-05, |
| "loss": 0.4317, |
| "step": 908 |
| }, |
| { |
| "epoch": 2.42, |
| "learning_rate": 3.846838824577026e-05, |
| "loss": 0.4648, |
| "step": 909 |
| }, |
| { |
| "epoch": 2.43, |
| "learning_rate": 3.8290293855743545e-05, |
| "loss": 0.2252, |
| "step": 910 |
| }, |
| { |
| "epoch": 2.43, |
| "learning_rate": 3.811219946571683e-05, |
| "loss": 0.6439, |
| "step": 911 |
| }, |
| { |
| "epoch": 2.43, |
| "learning_rate": 3.793410507569012e-05, |
| "loss": 0.5382, |
| "step": 912 |
| }, |
| { |
| "epoch": 2.43, |
| "learning_rate": 3.77560106856634e-05, |
| "loss": 0.4928, |
| "step": 913 |
| }, |
| { |
| "epoch": 2.44, |
| "learning_rate": 3.7577916295636686e-05, |
| "loss": 0.4914, |
| "step": 914 |
| }, |
| { |
| "epoch": 2.44, |
| "learning_rate": 3.7399821905609976e-05, |
| "loss": 0.5281, |
| "step": 915 |
| }, |
| { |
| "epoch": 2.44, |
| "learning_rate": 3.722172751558326e-05, |
| "loss": 0.5426, |
| "step": 916 |
| }, |
| { |
| "epoch": 2.45, |
| "learning_rate": 3.704363312555655e-05, |
| "loss": 0.4833, |
| "step": 917 |
| }, |
| { |
| "epoch": 2.45, |
| "learning_rate": 3.6865538735529834e-05, |
| "loss": 0.5073, |
| "step": 918 |
| }, |
| { |
| "epoch": 2.45, |
| "learning_rate": 3.668744434550312e-05, |
| "loss": 0.6785, |
| "step": 919 |
| }, |
| { |
| "epoch": 2.45, |
| "learning_rate": 3.65093499554764e-05, |
| "loss": 0.5446, |
| "step": 920 |
| }, |
| { |
| "epoch": 2.46, |
| "learning_rate": 3.633125556544969e-05, |
| "loss": 0.6372, |
| "step": 921 |
| }, |
| { |
| "epoch": 2.46, |
| "learning_rate": 3.615316117542298e-05, |
| "loss": 0.6244, |
| "step": 922 |
| }, |
| { |
| "epoch": 2.46, |
| "learning_rate": 3.597506678539626e-05, |
| "loss": 0.6706, |
| "step": 923 |
| }, |
| { |
| "epoch": 2.46, |
| "learning_rate": 3.579697239536955e-05, |
| "loss": 0.4218, |
| "step": 924 |
| }, |
| { |
| "epoch": 2.47, |
| "learning_rate": 3.561887800534283e-05, |
| "loss": 0.6145, |
| "step": 925 |
| }, |
| { |
| "epoch": 2.47, |
| "learning_rate": 3.544078361531612e-05, |
| "loss": 0.4184, |
| "step": 926 |
| }, |
| { |
| "epoch": 2.47, |
| "learning_rate": 3.5262689225289405e-05, |
| "loss": 0.5874, |
| "step": 927 |
| }, |
| { |
| "epoch": 2.47, |
| "learning_rate": 3.508459483526269e-05, |
| "loss": 0.5598, |
| "step": 928 |
| }, |
| { |
| "epoch": 2.48, |
| "learning_rate": 3.490650044523598e-05, |
| "loss": 0.5509, |
| "step": 929 |
| }, |
| { |
| "epoch": 2.48, |
| "learning_rate": 3.472840605520926e-05, |
| "loss": 0.456, |
| "step": 930 |
| }, |
| { |
| "epoch": 2.48, |
| "learning_rate": 3.455031166518255e-05, |
| "loss": 0.2864, |
| "step": 931 |
| }, |
| { |
| "epoch": 2.49, |
| "learning_rate": 3.437221727515583e-05, |
| "loss": 0.2309, |
| "step": 932 |
| }, |
| { |
| "epoch": 2.49, |
| "learning_rate": 3.419412288512912e-05, |
| "loss": 0.4592, |
| "step": 933 |
| }, |
| { |
| "epoch": 2.49, |
| "learning_rate": 3.4016028495102404e-05, |
| "loss": 0.6289, |
| "step": 934 |
| }, |
| { |
| "epoch": 2.49, |
| "learning_rate": 3.3837934105075694e-05, |
| "loss": 0.5241, |
| "step": 935 |
| }, |
| { |
| "epoch": 2.5, |
| "learning_rate": 3.365983971504898e-05, |
| "loss": 0.5452, |
| "step": 936 |
| }, |
| { |
| "epoch": 2.5, |
| "eval_loss": 0.7189126014709473, |
| "eval_runtime": 3073.0668, |
| "eval_samples_per_second": 0.976, |
| "eval_steps_per_second": 0.061, |
| "step": 936 |
| }, |
| { |
| "epoch": 2.5, |
| "learning_rate": 3.348174532502226e-05, |
| "loss": 0.4962, |
| "step": 937 |
| }, |
| { |
| "epoch": 2.5, |
| "learning_rate": 3.330365093499555e-05, |
| "loss": 0.4951, |
| "step": 938 |
| }, |
| { |
| "epoch": 2.5, |
| "learning_rate": 3.3125556544968835e-05, |
| "loss": 0.7431, |
| "step": 939 |
| }, |
| { |
| "epoch": 2.51, |
| "learning_rate": 3.294746215494212e-05, |
| "loss": 0.4837, |
| "step": 940 |
| }, |
| { |
| "epoch": 2.51, |
| "learning_rate": 3.27693677649154e-05, |
| "loss": 0.3271, |
| "step": 941 |
| }, |
| { |
| "epoch": 2.51, |
| "learning_rate": 3.259127337488869e-05, |
| "loss": 0.5759, |
| "step": 942 |
| }, |
| { |
| "epoch": 2.51, |
| "learning_rate": 3.241317898486198e-05, |
| "loss": 0.3889, |
| "step": 943 |
| }, |
| { |
| "epoch": 2.52, |
| "learning_rate": 3.2235084594835266e-05, |
| "loss": 0.5234, |
| "step": 944 |
| }, |
| { |
| "epoch": 2.52, |
| "learning_rate": 3.205699020480855e-05, |
| "loss": 0.5873, |
| "step": 945 |
| }, |
| { |
| "epoch": 2.52, |
| "learning_rate": 3.187889581478183e-05, |
| "loss": 0.4699, |
| "step": 946 |
| }, |
| { |
| "epoch": 2.53, |
| "learning_rate": 3.170080142475512e-05, |
| "loss": 0.4407, |
| "step": 947 |
| }, |
| { |
| "epoch": 2.53, |
| "learning_rate": 3.1522707034728406e-05, |
| "loss": 0.4821, |
| "step": 948 |
| }, |
| { |
| "epoch": 2.53, |
| "learning_rate": 3.134461264470169e-05, |
| "loss": 0.4958, |
| "step": 949 |
| }, |
| { |
| "epoch": 2.53, |
| "learning_rate": 3.116651825467498e-05, |
| "loss": 0.5546, |
| "step": 950 |
| }, |
| { |
| "epoch": 2.54, |
| "learning_rate": 3.0988423864648264e-05, |
| "loss": 0.4307, |
| "step": 951 |
| }, |
| { |
| "epoch": 2.54, |
| "learning_rate": 3.0810329474621554e-05, |
| "loss": 0.461, |
| "step": 952 |
| }, |
| { |
| "epoch": 2.54, |
| "learning_rate": 3.063223508459484e-05, |
| "loss": 0.2707, |
| "step": 953 |
| }, |
| { |
| "epoch": 2.54, |
| "learning_rate": 3.0454140694568124e-05, |
| "loss": 0.4925, |
| "step": 954 |
| }, |
| { |
| "epoch": 2.55, |
| "learning_rate": 3.027604630454141e-05, |
| "loss": 0.5388, |
| "step": 955 |
| }, |
| { |
| "epoch": 2.55, |
| "learning_rate": 3.0097951914514695e-05, |
| "loss": 0.0, |
| "step": 956 |
| }, |
| { |
| "epoch": 2.55, |
| "learning_rate": 2.9919857524487982e-05, |
| "loss": 0.4366, |
| "step": 957 |
| }, |
| { |
| "epoch": 2.55, |
| "learning_rate": 2.9741763134461265e-05, |
| "loss": 0.6114, |
| "step": 958 |
| }, |
| { |
| "epoch": 2.56, |
| "learning_rate": 2.9563668744434552e-05, |
| "loss": 0.2699, |
| "step": 959 |
| }, |
| { |
| "epoch": 2.56, |
| "learning_rate": 2.9385574354407836e-05, |
| "loss": 0.5328, |
| "step": 960 |
| }, |
| { |
| "epoch": 2.56, |
| "learning_rate": 2.9207479964381123e-05, |
| "loss": 0.2477, |
| "step": 961 |
| }, |
| { |
| "epoch": 2.57, |
| "learning_rate": 2.9029385574354413e-05, |
| "loss": 0.0, |
| "step": 962 |
| }, |
| { |
| "epoch": 2.57, |
| "learning_rate": 2.8851291184327693e-05, |
| "loss": 0.5676, |
| "step": 963 |
| }, |
| { |
| "epoch": 2.57, |
| "learning_rate": 2.8673196794300983e-05, |
| "loss": 0.4322, |
| "step": 964 |
| }, |
| { |
| "epoch": 2.57, |
| "learning_rate": 2.8495102404274267e-05, |
| "loss": 0.7527, |
| "step": 965 |
| }, |
| { |
| "epoch": 2.58, |
| "learning_rate": 2.8317008014247554e-05, |
| "loss": 0.4511, |
| "step": 966 |
| }, |
| { |
| "epoch": 2.58, |
| "learning_rate": 2.8138913624220837e-05, |
| "loss": 0.4393, |
| "step": 967 |
| }, |
| { |
| "epoch": 2.58, |
| "learning_rate": 2.7960819234194124e-05, |
| "loss": 0.6151, |
| "step": 968 |
| }, |
| { |
| "epoch": 2.58, |
| "learning_rate": 2.7782724844167414e-05, |
| "loss": 0.5711, |
| "step": 969 |
| }, |
| { |
| "epoch": 2.59, |
| "learning_rate": 2.7604630454140694e-05, |
| "loss": 0.562, |
| "step": 970 |
| }, |
| { |
| "epoch": 2.59, |
| "learning_rate": 2.7426536064113985e-05, |
| "loss": 0.4842, |
| "step": 971 |
| }, |
| { |
| "epoch": 2.59, |
| "learning_rate": 2.7248441674087265e-05, |
| "loss": 0.5702, |
| "step": 972 |
| }, |
| { |
| "epoch": 2.59, |
| "learning_rate": 2.7070347284060555e-05, |
| "loss": 0.0, |
| "step": 973 |
| }, |
| { |
| "epoch": 2.6, |
| "learning_rate": 2.689225289403384e-05, |
| "loss": 0.0, |
| "step": 974 |
| }, |
| { |
| "epoch": 2.6, |
| "learning_rate": 2.6714158504007125e-05, |
| "loss": 0.3925, |
| "step": 975 |
| }, |
| { |
| "epoch": 2.6, |
| "learning_rate": 2.6536064113980412e-05, |
| "loss": 0.2426, |
| "step": 976 |
| }, |
| { |
| "epoch": 2.61, |
| "learning_rate": 2.6357969723953696e-05, |
| "loss": 0.5499, |
| "step": 977 |
| }, |
| { |
| "epoch": 2.61, |
| "learning_rate": 2.6179875333926983e-05, |
| "loss": 0.5658, |
| "step": 978 |
| }, |
| { |
| "epoch": 2.61, |
| "learning_rate": 2.6001780943900266e-05, |
| "loss": 0.4884, |
| "step": 979 |
| }, |
| { |
| "epoch": 2.61, |
| "learning_rate": 2.5823686553873557e-05, |
| "loss": 0.6483, |
| "step": 980 |
| }, |
| { |
| "epoch": 2.62, |
| "learning_rate": 2.5645592163846837e-05, |
| "loss": 0.6559, |
| "step": 981 |
| }, |
| { |
| "epoch": 2.62, |
| "learning_rate": 2.5467497773820127e-05, |
| "loss": 0.5516, |
| "step": 982 |
| }, |
| { |
| "epoch": 2.62, |
| "learning_rate": 2.5289403383793414e-05, |
| "loss": 0.4207, |
| "step": 983 |
| }, |
| { |
| "epoch": 2.62, |
| "learning_rate": 2.5111308993766697e-05, |
| "loss": 0.5948, |
| "step": 984 |
| }, |
| { |
| "epoch": 2.63, |
| "learning_rate": 2.493321460373998e-05, |
| "loss": 0.5739, |
| "step": 985 |
| }, |
| { |
| "epoch": 2.63, |
| "learning_rate": 2.475512021371327e-05, |
| "loss": 0.4825, |
| "step": 986 |
| }, |
| { |
| "epoch": 2.63, |
| "learning_rate": 2.4577025823686555e-05, |
| "loss": 0.5103, |
| "step": 987 |
| }, |
| { |
| "epoch": 2.63, |
| "learning_rate": 2.439893143365984e-05, |
| "loss": 0.3186, |
| "step": 988 |
| }, |
| { |
| "epoch": 2.64, |
| "learning_rate": 2.4220837043633125e-05, |
| "loss": 0.4993, |
| "step": 989 |
| }, |
| { |
| "epoch": 2.64, |
| "learning_rate": 2.4042742653606412e-05, |
| "loss": 0.2724, |
| "step": 990 |
| }, |
| { |
| "epoch": 2.64, |
| "learning_rate": 2.38646482635797e-05, |
| "loss": 0.4808, |
| "step": 991 |
| }, |
| { |
| "epoch": 2.65, |
| "learning_rate": 2.3686553873552982e-05, |
| "loss": 0.5503, |
| "step": 992 |
| }, |
| { |
| "epoch": 2.65, |
| "learning_rate": 2.3508459483526273e-05, |
| "loss": 0.6419, |
| "step": 993 |
| }, |
| { |
| "epoch": 2.65, |
| "learning_rate": 2.3330365093499556e-05, |
| "loss": 0.7394, |
| "step": 994 |
| }, |
| { |
| "epoch": 2.65, |
| "learning_rate": 2.3152270703472843e-05, |
| "loss": 0.5237, |
| "step": 995 |
| }, |
| { |
| "epoch": 2.66, |
| "learning_rate": 2.2974176313446126e-05, |
| "loss": 0.448, |
| "step": 996 |
| }, |
| { |
| "epoch": 2.66, |
| "learning_rate": 2.2796081923419413e-05, |
| "loss": 0.6442, |
| "step": 997 |
| }, |
| { |
| "epoch": 2.66, |
| "learning_rate": 2.2617987533392697e-05, |
| "loss": 0.5423, |
| "step": 998 |
| }, |
| { |
| "epoch": 2.66, |
| "learning_rate": 2.2439893143365984e-05, |
| "loss": 0.5845, |
| "step": 999 |
| }, |
| { |
| "epoch": 2.67, |
| "learning_rate": 2.226179875333927e-05, |
| "loss": 0.4551, |
| "step": 1000 |
| }, |
| { |
| "epoch": 2.67, |
| "learning_rate": 2.2083704363312558e-05, |
| "loss": 0.4767, |
| "step": 1001 |
| }, |
| { |
| "epoch": 2.67, |
| "learning_rate": 2.1905609973285844e-05, |
| "loss": 0.6081, |
| "step": 1002 |
| }, |
| { |
| "epoch": 2.67, |
| "learning_rate": 2.1727515583259128e-05, |
| "loss": 0.3134, |
| "step": 1003 |
| }, |
| { |
| "epoch": 2.68, |
| "learning_rate": 2.1549421193232415e-05, |
| "loss": 0.5865, |
| "step": 1004 |
| }, |
| { |
| "epoch": 2.68, |
| "learning_rate": 2.13713268032057e-05, |
| "loss": 0.1635, |
| "step": 1005 |
| }, |
| { |
| "epoch": 2.68, |
| "learning_rate": 2.119323241317899e-05, |
| "loss": 0.5114, |
| "step": 1006 |
| }, |
| { |
| "epoch": 2.69, |
| "learning_rate": 2.1015138023152272e-05, |
| "loss": 0.585, |
| "step": 1007 |
| }, |
| { |
| "epoch": 2.69, |
| "learning_rate": 2.083704363312556e-05, |
| "loss": 0.5333, |
| "step": 1008 |
| }, |
| { |
| "epoch": 2.69, |
| "learning_rate": 2.0658949243098843e-05, |
| "loss": 0.2309, |
| "step": 1009 |
| }, |
| { |
| "epoch": 2.69, |
| "learning_rate": 2.048085485307213e-05, |
| "loss": 0.7229, |
| "step": 1010 |
| }, |
| { |
| "epoch": 2.7, |
| "learning_rate": 2.0302760463045413e-05, |
| "loss": 0.3001, |
| "step": 1011 |
| }, |
| { |
| "epoch": 2.7, |
| "learning_rate": 2.01246660730187e-05, |
| "loss": 0.5959, |
| "step": 1012 |
| }, |
| { |
| "epoch": 2.7, |
| "learning_rate": 1.9946571682991987e-05, |
| "loss": 0.7011, |
| "step": 1013 |
| }, |
| { |
| "epoch": 2.7, |
| "learning_rate": 1.9768477292965274e-05, |
| "loss": 0.5569, |
| "step": 1014 |
| }, |
| { |
| "epoch": 2.71, |
| "learning_rate": 1.959038290293856e-05, |
| "loss": 0.2309, |
| "step": 1015 |
| }, |
| { |
| "epoch": 2.71, |
| "learning_rate": 1.9412288512911844e-05, |
| "loss": 0.1953, |
| "step": 1016 |
| }, |
| { |
| "epoch": 2.71, |
| "learning_rate": 1.923419412288513e-05, |
| "loss": 0.6111, |
| "step": 1017 |
| }, |
| { |
| "epoch": 2.71, |
| "learning_rate": 1.9056099732858414e-05, |
| "loss": 0.53, |
| "step": 1018 |
| }, |
| { |
| "epoch": 2.72, |
| "learning_rate": 1.88780053428317e-05, |
| "loss": 0.4797, |
| "step": 1019 |
| }, |
| { |
| "epoch": 2.72, |
| "learning_rate": 1.8699910952804988e-05, |
| "loss": 0.5537, |
| "step": 1020 |
| }, |
| { |
| "epoch": 2.72, |
| "learning_rate": 1.8521816562778275e-05, |
| "loss": 0.5335, |
| "step": 1021 |
| }, |
| { |
| "epoch": 2.73, |
| "learning_rate": 1.834372217275156e-05, |
| "loss": 0.2199, |
| "step": 1022 |
| }, |
| { |
| "epoch": 2.73, |
| "learning_rate": 1.8165627782724845e-05, |
| "loss": 0.205, |
| "step": 1023 |
| }, |
| { |
| "epoch": 2.73, |
| "learning_rate": 1.798753339269813e-05, |
| "loss": 0.5601, |
| "step": 1024 |
| }, |
| { |
| "epoch": 2.73, |
| "learning_rate": 1.7809439002671416e-05, |
| "loss": 0.4996, |
| "step": 1025 |
| }, |
| { |
| "epoch": 2.74, |
| "learning_rate": 1.7631344612644703e-05, |
| "loss": 0.5524, |
| "step": 1026 |
| }, |
| { |
| "epoch": 2.74, |
| "learning_rate": 1.745325022261799e-05, |
| "loss": 0.3372, |
| "step": 1027 |
| }, |
| { |
| "epoch": 2.74, |
| "learning_rate": 1.7275155832591277e-05, |
| "loss": 0.5229, |
| "step": 1028 |
| }, |
| { |
| "epoch": 2.74, |
| "learning_rate": 1.709706144256456e-05, |
| "loss": 0.4927, |
| "step": 1029 |
| }, |
| { |
| "epoch": 2.75, |
| "learning_rate": 1.6918967052537847e-05, |
| "loss": 0.4765, |
| "step": 1030 |
| }, |
| { |
| "epoch": 2.75, |
| "learning_rate": 1.674087266251113e-05, |
| "loss": 0.5663, |
| "step": 1031 |
| }, |
| { |
| "epoch": 2.75, |
| "learning_rate": 1.6562778272484417e-05, |
| "loss": 0.6083, |
| "step": 1032 |
| }, |
| { |
| "epoch": 2.75, |
| "learning_rate": 1.63846838824577e-05, |
| "loss": 0.4604, |
| "step": 1033 |
| }, |
| { |
| "epoch": 2.76, |
| "learning_rate": 1.620658949243099e-05, |
| "loss": 0.4415, |
| "step": 1034 |
| }, |
| { |
| "epoch": 2.76, |
| "learning_rate": 1.6028495102404275e-05, |
| "loss": 0.1771, |
| "step": 1035 |
| }, |
| { |
| "epoch": 2.76, |
| "learning_rate": 1.585040071237756e-05, |
| "loss": 0.2848, |
| "step": 1036 |
| }, |
| { |
| "epoch": 2.77, |
| "learning_rate": 1.5672306322350845e-05, |
| "loss": 0.6078, |
| "step": 1037 |
| }, |
| { |
| "epoch": 2.77, |
| "learning_rate": 1.5494211932324132e-05, |
| "loss": 0.5374, |
| "step": 1038 |
| }, |
| { |
| "epoch": 2.77, |
| "learning_rate": 1.531611754229742e-05, |
| "loss": 0.5114, |
| "step": 1039 |
| }, |
| { |
| "epoch": 2.77, |
| "learning_rate": 1.5138023152270706e-05, |
| "loss": 0.5361, |
| "step": 1040 |
| }, |
| { |
| "epoch": 2.78, |
| "learning_rate": 1.4959928762243991e-05, |
| "loss": 0.3074, |
| "step": 1041 |
| }, |
| { |
| "epoch": 2.78, |
| "learning_rate": 1.4781834372217276e-05, |
| "loss": 0.5281, |
| "step": 1042 |
| }, |
| { |
| "epoch": 2.78, |
| "learning_rate": 1.4603739982190561e-05, |
| "loss": 0.2628, |
| "step": 1043 |
| }, |
| { |
| "epoch": 2.78, |
| "learning_rate": 1.4425645592163846e-05, |
| "loss": 0.6858, |
| "step": 1044 |
| }, |
| { |
| "epoch": 2.79, |
| "learning_rate": 1.4247551202137133e-05, |
| "loss": 0.5042, |
| "step": 1045 |
| }, |
| { |
| "epoch": 2.79, |
| "learning_rate": 1.4069456812110419e-05, |
| "loss": 0.539, |
| "step": 1046 |
| }, |
| { |
| "epoch": 2.79, |
| "learning_rate": 1.3891362422083707e-05, |
| "loss": 0.4862, |
| "step": 1047 |
| }, |
| { |
| "epoch": 2.79, |
| "learning_rate": 1.3713268032056992e-05, |
| "loss": 0.2817, |
| "step": 1048 |
| }, |
| { |
| "epoch": 2.8, |
| "learning_rate": 1.3535173642030278e-05, |
| "loss": 0.6027, |
| "step": 1049 |
| }, |
| { |
| "epoch": 2.8, |
| "learning_rate": 1.3357079252003563e-05, |
| "loss": 0.4997, |
| "step": 1050 |
| }, |
| { |
| "epoch": 2.8, |
| "learning_rate": 1.3178984861976848e-05, |
| "loss": 0.6057, |
| "step": 1051 |
| }, |
| { |
| "epoch": 2.81, |
| "learning_rate": 1.3000890471950133e-05, |
| "loss": 0.6287, |
| "step": 1052 |
| }, |
| { |
| "epoch": 2.81, |
| "learning_rate": 1.2822796081923418e-05, |
| "loss": 0.5216, |
| "step": 1053 |
| }, |
| { |
| "epoch": 2.81, |
| "learning_rate": 1.2644701691896707e-05, |
| "loss": 0.5183, |
| "step": 1054 |
| }, |
| { |
| "epoch": 2.81, |
| "learning_rate": 1.246660730186999e-05, |
| "loss": 0.5827, |
| "step": 1055 |
| }, |
| { |
| "epoch": 2.82, |
| "learning_rate": 1.2288512911843277e-05, |
| "loss": 0.7063, |
| "step": 1056 |
| }, |
| { |
| "epoch": 2.82, |
| "learning_rate": 1.2110418521816562e-05, |
| "loss": 0.5105, |
| "step": 1057 |
| }, |
| { |
| "epoch": 2.82, |
| "learning_rate": 1.193232413178985e-05, |
| "loss": 0.5323, |
| "step": 1058 |
| }, |
| { |
| "epoch": 2.82, |
| "learning_rate": 1.1754229741763136e-05, |
| "loss": 0.6138, |
| "step": 1059 |
| }, |
| { |
| "epoch": 2.83, |
| "learning_rate": 1.1576135351736421e-05, |
| "loss": 0.3587, |
| "step": 1060 |
| }, |
| { |
| "epoch": 2.83, |
| "learning_rate": 1.1398040961709707e-05, |
| "loss": 0.3221, |
| "step": 1061 |
| }, |
| { |
| "epoch": 2.83, |
| "learning_rate": 1.1219946571682992e-05, |
| "loss": 0.545, |
| "step": 1062 |
| }, |
| { |
| "epoch": 2.83, |
| "learning_rate": 1.1041852181656279e-05, |
| "loss": 0.5451, |
| "step": 1063 |
| }, |
| { |
| "epoch": 2.84, |
| "learning_rate": 1.0863757791629564e-05, |
| "loss": 0.4481, |
| "step": 1064 |
| }, |
| { |
| "epoch": 2.84, |
| "learning_rate": 1.068566340160285e-05, |
| "loss": 0.5007, |
| "step": 1065 |
| }, |
| { |
| "epoch": 2.84, |
| "learning_rate": 1.0507569011576136e-05, |
| "loss": 0.7057, |
| "step": 1066 |
| }, |
| { |
| "epoch": 2.85, |
| "learning_rate": 1.0329474621549421e-05, |
| "loss": 0.7881, |
| "step": 1067 |
| }, |
| { |
| "epoch": 2.85, |
| "learning_rate": 1.0151380231522706e-05, |
| "loss": 0.4972, |
| "step": 1068 |
| }, |
| { |
| "epoch": 2.85, |
| "learning_rate": 9.973285841495993e-06, |
| "loss": 0.6574, |
| "step": 1069 |
| }, |
| { |
| "epoch": 2.85, |
| "learning_rate": 9.79519145146928e-06, |
| "loss": 0.6185, |
| "step": 1070 |
| }, |
| { |
| "epoch": 2.86, |
| "learning_rate": 9.617097061442565e-06, |
| "loss": 0.4693, |
| "step": 1071 |
| }, |
| { |
| "epoch": 2.86, |
| "learning_rate": 9.43900267141585e-06, |
| "loss": 0.5833, |
| "step": 1072 |
| }, |
| { |
| "epoch": 2.86, |
| "learning_rate": 9.260908281389138e-06, |
| "loss": 0.5996, |
| "step": 1073 |
| }, |
| { |
| "epoch": 2.86, |
| "learning_rate": 9.082813891362423e-06, |
| "loss": 0.7077, |
| "step": 1074 |
| }, |
| { |
| "epoch": 2.87, |
| "learning_rate": 8.904719501335708e-06, |
| "loss": 0.5141, |
| "step": 1075 |
| }, |
| { |
| "epoch": 2.87, |
| "learning_rate": 8.726625111308995e-06, |
| "loss": 0.2115, |
| "step": 1076 |
| }, |
| { |
| "epoch": 2.87, |
| "learning_rate": 8.54853072128228e-06, |
| "loss": 0.6349, |
| "step": 1077 |
| }, |
| { |
| "epoch": 2.87, |
| "learning_rate": 8.370436331255565e-06, |
| "loss": 0.0, |
| "step": 1078 |
| }, |
| { |
| "epoch": 2.88, |
| "learning_rate": 8.19234194122885e-06, |
| "loss": 0.5415, |
| "step": 1079 |
| }, |
| { |
| "epoch": 2.88, |
| "learning_rate": 8.014247551202137e-06, |
| "loss": 0.5833, |
| "step": 1080 |
| }, |
| { |
| "epoch": 2.88, |
| "learning_rate": 7.836153161175422e-06, |
| "loss": 0.6318, |
| "step": 1081 |
| }, |
| { |
| "epoch": 2.89, |
| "learning_rate": 7.65805877114871e-06, |
| "loss": 0.5398, |
| "step": 1082 |
| }, |
| { |
| "epoch": 2.89, |
| "learning_rate": 7.4799643811219954e-06, |
| "loss": 0.8625, |
| "step": 1083 |
| }, |
| { |
| "epoch": 2.89, |
| "learning_rate": 7.301869991095281e-06, |
| "loss": 0.4004, |
| "step": 1084 |
| }, |
| { |
| "epoch": 2.89, |
| "learning_rate": 7.123775601068567e-06, |
| "loss": 0.636, |
| "step": 1085 |
| }, |
| { |
| "epoch": 2.9, |
| "learning_rate": 6.9456812110418536e-06, |
| "loss": 0.5235, |
| "step": 1086 |
| }, |
| { |
| "epoch": 2.9, |
| "learning_rate": 6.767586821015139e-06, |
| "loss": 0.4901, |
| "step": 1087 |
| }, |
| { |
| "epoch": 2.9, |
| "learning_rate": 6.589492430988424e-06, |
| "loss": 0.2543, |
| "step": 1088 |
| }, |
| { |
| "epoch": 2.9, |
| "learning_rate": 6.411398040961709e-06, |
| "loss": 0.4768, |
| "step": 1089 |
| }, |
| { |
| "epoch": 2.91, |
| "learning_rate": 6.233303650934995e-06, |
| "loss": 0.2038, |
| "step": 1090 |
| }, |
| { |
| "epoch": 2.91, |
| "learning_rate": 6.055209260908281e-06, |
| "loss": 0.3125, |
| "step": 1091 |
| }, |
| { |
| "epoch": 2.91, |
| "learning_rate": 5.877114870881568e-06, |
| "loss": 0.4375, |
| "step": 1092 |
| }, |
| { |
| "epoch": 2.91, |
| "eval_loss": 0.7176805138587952, |
| "eval_runtime": 3073.495, |
| "eval_samples_per_second": 0.976, |
| "eval_steps_per_second": 0.061, |
| "step": 1092 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 1125, |
| "num_train_epochs": 3, |
| "save_steps": 156, |
| "total_flos": 9.257195873540506e+17, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|