File size: 6,739 Bytes
c0ead48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
{
  "best_global_step": null,
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.7380073800738007,
  "eval_steps": 500,
  "global_step": 500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "entropy": 1.778737860918045,
      "epoch": 0.03690036900369004,
      "grad_norm": 0.06591796875,
      "learning_rate": 0.0001999897109785537,
      "loss": 1.9253872680664061,
      "mean_token_accuracy": 0.5836515012383461,
      "num_tokens": 158319.0,
      "step": 25
    },
    {
      "entropy": 1.3310806779563427,
      "epoch": 0.07380073800738007,
      "grad_norm": 0.10546875,
      "learning_rate": 0.00019910503467261722,
      "loss": 1.2929591369628906,
      "mean_token_accuracy": 0.6938765767216682,
      "num_tokens": 315084.0,
      "step": 50
    },
    {
      "entropy": 0.9595038790814578,
      "epoch": 0.11070110701107011,
      "grad_norm": 0.1552734375,
      "learning_rate": 0.00019680577845157155,
      "loss": 0.9015888214111328,
      "mean_token_accuracy": 0.7816851404309273,
      "num_tokens": 470867.0,
      "step": 75
    },
    {
      "entropy": 0.8337875098735094,
      "epoch": 0.14760147601476015,
      "grad_norm": 0.12255859375,
      "learning_rate": 0.0001931247608465915,
      "loss": 0.7552251434326172,
      "mean_token_accuracy": 0.812552462220192,
      "num_tokens": 626254.0,
      "step": 100
    },
    {
      "entropy": 0.6474238294456154,
      "epoch": 0.18450184501845018,
      "grad_norm": 0.189453125,
      "learning_rate": 0.0001881145230185612,
      "loss": 0.5701670455932617,
      "mean_token_accuracy": 0.8557327410578728,
      "num_tokens": 786488.0,
      "step": 125
    },
    {
      "entropy": 0.5202154207415879,
      "epoch": 0.22140221402214022,
      "grad_norm": 0.1259765625,
      "learning_rate": 0.00018184657880958635,
      "loss": 0.46115348815917967,
      "mean_token_accuracy": 0.8857239605486393,
      "num_tokens": 943912.0,
      "step": 150
    },
    {
      "entropy": 0.4175653001386672,
      "epoch": 0.25830258302583026,
      "grad_norm": 0.09033203125,
      "learning_rate": 0.00017441039398713608,
      "loss": 0.3705097579956055,
      "mean_token_accuracy": 0.9097741657495498,
      "num_tokens": 1101961.0,
      "step": 175
    },
    {
      "entropy": 0.3828558912500739,
      "epoch": 0.2952029520295203,
      "grad_norm": 0.11181640625,
      "learning_rate": 0.0001659121092506171,
      "loss": 0.3168667984008789,
      "mean_token_accuracy": 0.9167497155070304,
      "num_tokens": 1258551.0,
      "step": 200
    },
    {
      "entropy": 0.3276692613959312,
      "epoch": 0.33210332103321033,
      "grad_norm": 0.09228515625,
      "learning_rate": 0.00015647302522759962,
      "loss": 0.26206918716430666,
      "mean_token_accuracy": 0.9325725418329239,
      "num_tokens": 1414182.0,
      "step": 225
    },
    {
      "entropy": 0.2656678088847548,
      "epoch": 0.36900369003690037,
      "grad_norm": 0.0986328125,
      "learning_rate": 0.00014622787108416584,
      "loss": 0.2154819107055664,
      "mean_token_accuracy": 0.9447659783065319,
      "num_tokens": 1571401.0,
      "step": 250
    },
    {
      "entropy": 0.23459753211122006,
      "epoch": 0.4059040590405904,
      "grad_norm": 0.09375,
      "learning_rate": 0.00013532288146244446,
      "loss": 0.18326162338256835,
      "mean_token_accuracy": 0.9511218988895416,
      "num_tokens": 1727812.0,
      "step": 275
    },
    {
      "entropy": 0.17023452829569577,
      "epoch": 0.44280442804428044,
      "grad_norm": 0.1201171875,
      "learning_rate": 0.00012391370919424485,
      "loss": 0.12261626243591309,
      "mean_token_accuracy": 0.9681580965220928,
      "num_tokens": 1885993.0,
      "step": 300
    },
    {
      "entropy": 0.17609536536969245,
      "epoch": 0.4797047970479705,
      "grad_norm": 0.1162109375,
      "learning_rate": 0.00011216320358376157,
      "loss": 0.137310152053833,
      "mean_token_accuracy": 0.9655524456501007,
      "num_tokens": 2040863.0,
      "step": 325
    },
    {
      "entropy": 0.12500815264414997,
      "epoch": 0.5166051660516605,
      "grad_norm": 0.1328125,
      "learning_rate": 0.00010023908597112514,
      "loss": 0.08844621658325195,
      "mean_token_accuracy": 0.9776459574699402,
      "num_tokens": 2193836.0,
      "step": 350
    },
    {
      "entropy": 0.13451697141863406,
      "epoch": 0.5535055350553506,
      "grad_norm": 0.126953125,
      "learning_rate": 8.831155575474102e-05,
      "loss": 0.09469686508178711,
      "mean_token_accuracy": 0.9756594082713127,
      "num_tokens": 2352735.0,
      "step": 375
    },
    {
      "entropy": 0.11615390982478857,
      "epoch": 0.5904059040590406,
      "grad_norm": 0.08251953125,
      "learning_rate": 7.655086104295904e-05,
      "loss": 0.08285388946533204,
      "mean_token_accuracy": 0.9785897681117057,
      "num_tokens": 2506252.0,
      "step": 400
    },
    {
      "entropy": 0.09932436655741185,
      "epoch": 0.6273062730627307,
      "grad_norm": 0.0247802734375,
      "learning_rate": 6.512486861047911e-05,
      "loss": 0.07087704181671142,
      "mean_token_accuracy": 0.9825253394246102,
      "num_tokens": 2661297.0,
      "step": 425
    },
    {
      "entropy": 0.07707942618988455,
      "epoch": 0.6642066420664207,
      "grad_norm": 0.1318359375,
      "learning_rate": 5.419666784482398e-05,
      "loss": 0.05760700225830078,
      "mean_token_accuracy": 0.9852602830529213,
      "num_tokens": 2816646.0,
      "step": 450
    },
    {
      "entropy": 0.07893491809722036,
      "epoch": 0.7011070110701108,
      "grad_norm": 0.06591796875,
      "learning_rate": 4.3922242883050224e-05,
      "loss": 0.048433661460876465,
      "mean_token_accuracy": 0.9880265095829963,
      "num_tokens": 2973546.0,
      "step": 475
    },
    {
      "entropy": 0.07320529426448047,
      "epoch": 0.7380073800738007,
      "grad_norm": 0.03271484375,
      "learning_rate": 3.444824616555346e-05,
      "loss": 0.04846723079681396,
      "mean_token_accuracy": 0.9885234755277633,
      "num_tokens": 3129376.0,
      "step": 500
    }
  ],
  "logging_steps": 25,
  "max_steps": 678,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 500,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.3351775573537587e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}