Farouk commited on
Commit
b03a428
Β·
1 Parent(s): c0f9f39

Training in progress, step 8600

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfed7c94376b57df7885b363b9837f37bbf095ea22f8b365172facef12be4769
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cd76d4c99e5f1860f821f15f0e61f5347edaf2bed6bfab9e2c27da23f62ccc1
3
  size 319977229
checkpoint-5200/adapter_model/adapter_model/README.md CHANGED
@@ -169,6 +169,17 @@ The following `bitsandbytes` quantization config was used during training:
169
  - bnb_4bit_use_double_quant: True
170
  - bnb_4bit_compute_dtype: bfloat16
171
 
 
 
 
 
 
 
 
 
 
 
 
172
  The following `bitsandbytes` quantization config was used during training:
173
  - load_in_8bit: False
174
  - load_in_4bit: True
@@ -196,5 +207,6 @@ The following `bitsandbytes` quantization config was used during training:
196
  - PEFT 0.4.0
197
  - PEFT 0.4.0
198
  - PEFT 0.4.0
 
199
 
200
  - PEFT 0.4.0
 
169
  - bnb_4bit_use_double_quant: True
170
  - bnb_4bit_compute_dtype: bfloat16
171
 
172
+ The following `bitsandbytes` quantization config was used during training:
173
+ - load_in_8bit: False
174
+ - load_in_4bit: True
175
+ - llm_int8_threshold: 6.0
176
+ - llm_int8_skip_modules: None
177
+ - llm_int8_enable_fp32_cpu_offload: False
178
+ - llm_int8_has_fp16_weight: False
179
+ - bnb_4bit_quant_type: nf4
180
+ - bnb_4bit_use_double_quant: True
181
+ - bnb_4bit_compute_dtype: bfloat16
182
+
183
  The following `bitsandbytes` quantization config was used during training:
184
  - load_in_8bit: False
185
  - load_in_4bit: True
 
207
  - PEFT 0.4.0
208
  - PEFT 0.4.0
209
  - PEFT 0.4.0
210
+ - PEFT 0.4.0
211
 
212
  - PEFT 0.4.0
checkpoint-5200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcd2ce7263d0adeeaba023d36319d2ddb2c9340086da2260873038894b516d20
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfed7c94376b57df7885b363b9837f37bbf095ea22f8b365172facef12be4769
3
  size 319977229
{checkpoint-6600 β†’ checkpoint-8600}/README.md RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600}/adapter_config.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49c1c2d4d567a9d742911d1bb7b4a45608cd386734c373b626d078959f300b2c
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cd76d4c99e5f1860f821f15f0e61f5347edaf2bed6bfab9e2c27da23f62ccc1
3
  size 319977229
{checkpoint-6600 β†’ checkpoint-8600}/added_tokens.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b96baa36ed41f8e42983fc446b14b4ed7c3a331e2430e79b2d2d7a854b893d05
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec8127d784283d741c5c5f468e50194dd89dbb6fd79bce2d143fd8018e0c72cd
3
  size 1279539973
{checkpoint-6600 β†’ checkpoint-8600}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db85f493d8c2c77c44ec983837410d25a2ef9cf3a310af38f887d3dde9e625c6
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e63b14e0361420ee21294172b2437b20a3c1c2172bd7b094af8942a82878e7bc
3
  size 14511
{checkpoint-6600 β†’ checkpoint-8600}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa19c433c8c029403e57118df2ab52631b3fc535294c01cab201bdeb198ed0f4
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58bb158dc8e2249ddde152cf1ccc6f5a31e448a10bd61b537efe520ccb7eb273
3
  size 627
{checkpoint-6600 β†’ checkpoint-8600}/special_tokens_map.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600}/tokenizer.model RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600}/tokenizer_config.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": 0.4120824635028839,
3
  "best_model_checkpoint": "experts/expert-10/checkpoint-5200",
4
- "epoch": 2.4908010189640533,
5
- "global_step": 6600,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -6309,11 +6309,1921 @@
6309
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
6310
  "mmlu_loss": 1.6874822327969279,
6311
  "step": 6600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6312
  }
6313
  ],
6314
  "max_steps": 10000,
6315
  "num_train_epochs": 4,
6316
- "total_flos": 4.71919989971583e+17,
6317
  "trial_name": null,
6318
  "trial_params": null
6319
  }
 
1
  {
2
  "best_metric": 0.4120824635028839,
3
  "best_model_checkpoint": "experts/expert-10/checkpoint-5200",
4
+ "epoch": 3.245589206528918,
5
+ "global_step": 8600,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
6309
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
6310
  "mmlu_loss": 1.6874822327969279,
6311
  "step": 6600
6312
+ },
6313
+ {
6314
+ "epoch": 2.49,
6315
+ "learning_rate": 0.0002,
6316
+ "loss": 0.2808,
6317
+ "step": 6610
6318
+ },
6319
+ {
6320
+ "epoch": 2.5,
6321
+ "learning_rate": 0.0002,
6322
+ "loss": 0.2967,
6323
+ "step": 6620
6324
+ },
6325
+ {
6326
+ "epoch": 2.5,
6327
+ "learning_rate": 0.0002,
6328
+ "loss": 0.2778,
6329
+ "step": 6630
6330
+ },
6331
+ {
6332
+ "epoch": 2.51,
6333
+ "learning_rate": 0.0002,
6334
+ "loss": 0.2893,
6335
+ "step": 6640
6336
+ },
6337
+ {
6338
+ "epoch": 2.51,
6339
+ "learning_rate": 0.0002,
6340
+ "loss": 0.316,
6341
+ "step": 6650
6342
+ },
6343
+ {
6344
+ "epoch": 2.51,
6345
+ "learning_rate": 0.0002,
6346
+ "loss": 0.3247,
6347
+ "step": 6660
6348
+ },
6349
+ {
6350
+ "epoch": 2.52,
6351
+ "learning_rate": 0.0002,
6352
+ "loss": 0.3225,
6353
+ "step": 6670
6354
+ },
6355
+ {
6356
+ "epoch": 2.52,
6357
+ "learning_rate": 0.0002,
6358
+ "loss": 0.3463,
6359
+ "step": 6680
6360
+ },
6361
+ {
6362
+ "epoch": 2.52,
6363
+ "learning_rate": 0.0002,
6364
+ "loss": 0.2973,
6365
+ "step": 6690
6366
+ },
6367
+ {
6368
+ "epoch": 2.53,
6369
+ "learning_rate": 0.0002,
6370
+ "loss": 0.3279,
6371
+ "step": 6700
6372
+ },
6373
+ {
6374
+ "epoch": 2.53,
6375
+ "learning_rate": 0.0002,
6376
+ "loss": 0.328,
6377
+ "step": 6710
6378
+ },
6379
+ {
6380
+ "epoch": 2.54,
6381
+ "learning_rate": 0.0002,
6382
+ "loss": 0.2795,
6383
+ "step": 6720
6384
+ },
6385
+ {
6386
+ "epoch": 2.54,
6387
+ "learning_rate": 0.0002,
6388
+ "loss": 0.323,
6389
+ "step": 6730
6390
+ },
6391
+ {
6392
+ "epoch": 2.54,
6393
+ "learning_rate": 0.0002,
6394
+ "loss": 0.3291,
6395
+ "step": 6740
6396
+ },
6397
+ {
6398
+ "epoch": 2.55,
6399
+ "learning_rate": 0.0002,
6400
+ "loss": 0.2941,
6401
+ "step": 6750
6402
+ },
6403
+ {
6404
+ "epoch": 2.55,
6405
+ "learning_rate": 0.0002,
6406
+ "loss": 0.2855,
6407
+ "step": 6760
6408
+ },
6409
+ {
6410
+ "epoch": 2.55,
6411
+ "learning_rate": 0.0002,
6412
+ "loss": 0.3361,
6413
+ "step": 6770
6414
+ },
6415
+ {
6416
+ "epoch": 2.56,
6417
+ "learning_rate": 0.0002,
6418
+ "loss": 0.2912,
6419
+ "step": 6780
6420
+ },
6421
+ {
6422
+ "epoch": 2.56,
6423
+ "learning_rate": 0.0002,
6424
+ "loss": 0.3228,
6425
+ "step": 6790
6426
+ },
6427
+ {
6428
+ "epoch": 2.57,
6429
+ "learning_rate": 0.0002,
6430
+ "loss": 0.3012,
6431
+ "step": 6800
6432
+ },
6433
+ {
6434
+ "epoch": 2.57,
6435
+ "eval_loss": 0.42342400550842285,
6436
+ "eval_runtime": 103.8163,
6437
+ "eval_samples_per_second": 9.632,
6438
+ "eval_steps_per_second": 4.816,
6439
+ "step": 6800
6440
+ },
6441
+ {
6442
+ "epoch": 2.57,
6443
+ "mmlu_eval_accuracy": 0.5083832679706537,
6444
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6445
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
6446
+ "mmlu_eval_accuracy_astronomy": 0.375,
6447
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
6448
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
6449
+ "mmlu_eval_accuracy_college_biology": 0.5,
6450
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
6451
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
6452
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6453
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6454
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6455
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6456
+ "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
6457
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6458
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
6459
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
6460
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6461
+ "mmlu_eval_accuracy_global_facts": 0.5,
6462
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
6463
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
6464
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6465
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6466
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6467
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
6468
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
6469
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
6470
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
6471
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
6472
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6473
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
6474
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
6475
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6476
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
6477
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6478
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6479
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
6480
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6481
+ "mmlu_eval_accuracy_machine_learning": 0.45454545454545453,
6482
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6483
+ "mmlu_eval_accuracy_marketing": 0.84,
6484
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6485
+ "mmlu_eval_accuracy_miscellaneous": 0.7209302325581395,
6486
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
6487
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
6488
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
6489
+ "mmlu_eval_accuracy_philosophy": 0.5882352941176471,
6490
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6491
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6492
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
6493
+ "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
6494
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
6495
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
6496
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
6497
+ "mmlu_eval_accuracy_sociology": 0.7727272727272727,
6498
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
6499
+ "mmlu_eval_accuracy_virology": 0.5,
6500
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
6501
+ "mmlu_loss": 1.7931228834484016,
6502
+ "step": 6800
6503
+ },
6504
+ {
6505
+ "epoch": 2.57,
6506
+ "learning_rate": 0.0002,
6507
+ "loss": 0.2717,
6508
+ "step": 6810
6509
+ },
6510
+ {
6511
+ "epoch": 2.57,
6512
+ "learning_rate": 0.0002,
6513
+ "loss": 0.3164,
6514
+ "step": 6820
6515
+ },
6516
+ {
6517
+ "epoch": 2.58,
6518
+ "learning_rate": 0.0002,
6519
+ "loss": 0.3137,
6520
+ "step": 6830
6521
+ },
6522
+ {
6523
+ "epoch": 2.58,
6524
+ "learning_rate": 0.0002,
6525
+ "loss": 0.2917,
6526
+ "step": 6840
6527
+ },
6528
+ {
6529
+ "epoch": 2.59,
6530
+ "learning_rate": 0.0002,
6531
+ "loss": 0.2988,
6532
+ "step": 6850
6533
+ },
6534
+ {
6535
+ "epoch": 2.59,
6536
+ "learning_rate": 0.0002,
6537
+ "loss": 0.3279,
6538
+ "step": 6860
6539
+ },
6540
+ {
6541
+ "epoch": 2.59,
6542
+ "learning_rate": 0.0002,
6543
+ "loss": 0.3159,
6544
+ "step": 6870
6545
+ },
6546
+ {
6547
+ "epoch": 2.6,
6548
+ "learning_rate": 0.0002,
6549
+ "loss": 0.3194,
6550
+ "step": 6880
6551
+ },
6552
+ {
6553
+ "epoch": 2.6,
6554
+ "learning_rate": 0.0002,
6555
+ "loss": 0.318,
6556
+ "step": 6890
6557
+ },
6558
+ {
6559
+ "epoch": 2.6,
6560
+ "learning_rate": 0.0002,
6561
+ "loss": 0.2968,
6562
+ "step": 6900
6563
+ },
6564
+ {
6565
+ "epoch": 2.61,
6566
+ "learning_rate": 0.0002,
6567
+ "loss": 0.2887,
6568
+ "step": 6910
6569
+ },
6570
+ {
6571
+ "epoch": 2.61,
6572
+ "learning_rate": 0.0002,
6573
+ "loss": 0.31,
6574
+ "step": 6920
6575
+ },
6576
+ {
6577
+ "epoch": 2.62,
6578
+ "learning_rate": 0.0002,
6579
+ "loss": 0.3142,
6580
+ "step": 6930
6581
+ },
6582
+ {
6583
+ "epoch": 2.62,
6584
+ "learning_rate": 0.0002,
6585
+ "loss": 0.332,
6586
+ "step": 6940
6587
+ },
6588
+ {
6589
+ "epoch": 2.62,
6590
+ "learning_rate": 0.0002,
6591
+ "loss": 0.3394,
6592
+ "step": 6950
6593
+ },
6594
+ {
6595
+ "epoch": 2.63,
6596
+ "learning_rate": 0.0002,
6597
+ "loss": 0.315,
6598
+ "step": 6960
6599
+ },
6600
+ {
6601
+ "epoch": 2.63,
6602
+ "learning_rate": 0.0002,
6603
+ "loss": 0.3299,
6604
+ "step": 6970
6605
+ },
6606
+ {
6607
+ "epoch": 2.63,
6608
+ "learning_rate": 0.0002,
6609
+ "loss": 0.2773,
6610
+ "step": 6980
6611
+ },
6612
+ {
6613
+ "epoch": 2.64,
6614
+ "learning_rate": 0.0002,
6615
+ "loss": 0.3352,
6616
+ "step": 6990
6617
+ },
6618
+ {
6619
+ "epoch": 2.64,
6620
+ "learning_rate": 0.0002,
6621
+ "loss": 0.3176,
6622
+ "step": 7000
6623
+ },
6624
+ {
6625
+ "epoch": 2.64,
6626
+ "eval_loss": 0.4198983907699585,
6627
+ "eval_runtime": 103.7738,
6628
+ "eval_samples_per_second": 9.636,
6629
+ "eval_steps_per_second": 4.818,
6630
+ "step": 7000
6631
+ },
6632
+ {
6633
+ "epoch": 2.64,
6634
+ "mmlu_eval_accuracy": 0.5049972739205955,
6635
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
6636
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6637
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6638
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
6639
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6640
+ "mmlu_eval_accuracy_college_biology": 0.4375,
6641
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
6642
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
6643
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6644
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6645
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
6646
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
6647
+ "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
6648
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6649
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
6650
+ "mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634,
6651
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6652
+ "mmlu_eval_accuracy_global_facts": 0.5,
6653
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
6654
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
6655
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6656
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
6657
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6658
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6659
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
6660
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
6661
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5769230769230769,
6662
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
6663
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6664
+ "mmlu_eval_accuracy_high_school_statistics": 0.5217391304347826,
6665
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
6666
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
6667
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
6668
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6669
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6670
+ "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
6671
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
6672
+ "mmlu_eval_accuracy_machine_learning": 0.45454545454545453,
6673
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6674
+ "mmlu_eval_accuracy_marketing": 0.84,
6675
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6676
+ "mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
6677
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
6678
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
6679
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
6680
+ "mmlu_eval_accuracy_philosophy": 0.5882352941176471,
6681
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6682
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
6683
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
6684
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
6685
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
6686
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
6687
+ "mmlu_eval_accuracy_security_studies": 0.37037037037037035,
6688
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
6689
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
6690
+ "mmlu_eval_accuracy_virology": 0.5,
6691
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
6692
+ "mmlu_loss": 1.7167998881470752,
6693
+ "step": 7000
6694
+ },
6695
+ {
6696
+ "epoch": 2.65,
6697
+ "learning_rate": 0.0002,
6698
+ "loss": 0.3009,
6699
+ "step": 7010
6700
+ },
6701
+ {
6702
+ "epoch": 2.65,
6703
+ "learning_rate": 0.0002,
6704
+ "loss": 0.3318,
6705
+ "step": 7020
6706
+ },
6707
+ {
6708
+ "epoch": 2.65,
6709
+ "learning_rate": 0.0002,
6710
+ "loss": 0.2991,
6711
+ "step": 7030
6712
+ },
6713
+ {
6714
+ "epoch": 2.66,
6715
+ "learning_rate": 0.0002,
6716
+ "loss": 0.3303,
6717
+ "step": 7040
6718
+ },
6719
+ {
6720
+ "epoch": 2.66,
6721
+ "learning_rate": 0.0002,
6722
+ "loss": 0.3589,
6723
+ "step": 7050
6724
+ },
6725
+ {
6726
+ "epoch": 2.66,
6727
+ "learning_rate": 0.0002,
6728
+ "loss": 0.2999,
6729
+ "step": 7060
6730
+ },
6731
+ {
6732
+ "epoch": 2.67,
6733
+ "learning_rate": 0.0002,
6734
+ "loss": 0.3228,
6735
+ "step": 7070
6736
+ },
6737
+ {
6738
+ "epoch": 2.67,
6739
+ "learning_rate": 0.0002,
6740
+ "loss": 0.3101,
6741
+ "step": 7080
6742
+ },
6743
+ {
6744
+ "epoch": 2.68,
6745
+ "learning_rate": 0.0002,
6746
+ "loss": 0.3345,
6747
+ "step": 7090
6748
+ },
6749
+ {
6750
+ "epoch": 2.68,
6751
+ "learning_rate": 0.0002,
6752
+ "loss": 0.3219,
6753
+ "step": 7100
6754
+ },
6755
+ {
6756
+ "epoch": 2.68,
6757
+ "learning_rate": 0.0002,
6758
+ "loss": 0.2991,
6759
+ "step": 7110
6760
+ },
6761
+ {
6762
+ "epoch": 2.69,
6763
+ "learning_rate": 0.0002,
6764
+ "loss": 0.2836,
6765
+ "step": 7120
6766
+ },
6767
+ {
6768
+ "epoch": 2.69,
6769
+ "learning_rate": 0.0002,
6770
+ "loss": 0.2963,
6771
+ "step": 7130
6772
+ },
6773
+ {
6774
+ "epoch": 2.69,
6775
+ "learning_rate": 0.0002,
6776
+ "loss": 0.3063,
6777
+ "step": 7140
6778
+ },
6779
+ {
6780
+ "epoch": 2.7,
6781
+ "learning_rate": 0.0002,
6782
+ "loss": 0.3197,
6783
+ "step": 7150
6784
+ },
6785
+ {
6786
+ "epoch": 2.7,
6787
+ "learning_rate": 0.0002,
6788
+ "loss": 0.3174,
6789
+ "step": 7160
6790
+ },
6791
+ {
6792
+ "epoch": 2.71,
6793
+ "learning_rate": 0.0002,
6794
+ "loss": 0.3347,
6795
+ "step": 7170
6796
+ },
6797
+ {
6798
+ "epoch": 2.71,
6799
+ "learning_rate": 0.0002,
6800
+ "loss": 0.3377,
6801
+ "step": 7180
6802
+ },
6803
+ {
6804
+ "epoch": 2.71,
6805
+ "learning_rate": 0.0002,
6806
+ "loss": 0.3304,
6807
+ "step": 7190
6808
+ },
6809
+ {
6810
+ "epoch": 2.72,
6811
+ "learning_rate": 0.0002,
6812
+ "loss": 0.2925,
6813
+ "step": 7200
6814
+ },
6815
+ {
6816
+ "epoch": 2.72,
6817
+ "eval_loss": 0.41841480135917664,
6818
+ "eval_runtime": 103.8179,
6819
+ "eval_samples_per_second": 9.632,
6820
+ "eval_steps_per_second": 4.816,
6821
+ "step": 7200
6822
+ },
6823
+ {
6824
+ "epoch": 2.72,
6825
+ "mmlu_eval_accuracy": 0.47795310149751336,
6826
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
6827
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
6828
+ "mmlu_eval_accuracy_astronomy": 0.375,
6829
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
6830
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6831
+ "mmlu_eval_accuracy_college_biology": 0.375,
6832
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
6833
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
6834
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
6835
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6836
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
6837
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6838
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6839
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6840
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
6841
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
6842
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
6843
+ "mmlu_eval_accuracy_global_facts": 0.4,
6844
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
6845
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
6846
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6847
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6848
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6849
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
6850
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
6851
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
6852
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
6853
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
6854
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
6855
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
6856
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
6857
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
6858
+ "mmlu_eval_accuracy_human_aging": 0.5652173913043478,
6859
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6860
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6861
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
6862
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6863
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
6864
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6865
+ "mmlu_eval_accuracy_marketing": 0.84,
6866
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
6867
+ "mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
6868
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
6869
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
6870
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
6871
+ "mmlu_eval_accuracy_philosophy": 0.5882352941176471,
6872
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
6873
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6874
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
6875
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
6876
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
6877
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
6878
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
6879
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
6880
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
6881
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
6882
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
6883
+ "mmlu_loss": 1.6258604400462955,
6884
+ "step": 7200
6885
+ },
6886
+ {
6887
+ "epoch": 2.72,
6888
+ "learning_rate": 0.0002,
6889
+ "loss": 0.2999,
6890
+ "step": 7210
6891
+ },
6892
+ {
6893
+ "epoch": 2.72,
6894
+ "learning_rate": 0.0002,
6895
+ "loss": 0.3067,
6896
+ "step": 7220
6897
+ },
6898
+ {
6899
+ "epoch": 2.73,
6900
+ "learning_rate": 0.0002,
6901
+ "loss": 0.3321,
6902
+ "step": 7230
6903
+ },
6904
+ {
6905
+ "epoch": 2.73,
6906
+ "learning_rate": 0.0002,
6907
+ "loss": 0.3358,
6908
+ "step": 7240
6909
+ },
6910
+ {
6911
+ "epoch": 2.74,
6912
+ "learning_rate": 0.0002,
6913
+ "loss": 0.2871,
6914
+ "step": 7250
6915
+ },
6916
+ {
6917
+ "epoch": 2.74,
6918
+ "learning_rate": 0.0002,
6919
+ "loss": 0.3116,
6920
+ "step": 7260
6921
+ },
6922
+ {
6923
+ "epoch": 2.74,
6924
+ "learning_rate": 0.0002,
6925
+ "loss": 0.2967,
6926
+ "step": 7270
6927
+ },
6928
+ {
6929
+ "epoch": 2.75,
6930
+ "learning_rate": 0.0002,
6931
+ "loss": 0.3098,
6932
+ "step": 7280
6933
+ },
6934
+ {
6935
+ "epoch": 2.75,
6936
+ "learning_rate": 0.0002,
6937
+ "loss": 0.2847,
6938
+ "step": 7290
6939
+ },
6940
+ {
6941
+ "epoch": 2.75,
6942
+ "learning_rate": 0.0002,
6943
+ "loss": 0.2975,
6944
+ "step": 7300
6945
+ },
6946
+ {
6947
+ "epoch": 2.76,
6948
+ "learning_rate": 0.0002,
6949
+ "loss": 0.3012,
6950
+ "step": 7310
6951
+ },
6952
+ {
6953
+ "epoch": 2.76,
6954
+ "learning_rate": 0.0002,
6955
+ "loss": 0.3091,
6956
+ "step": 7320
6957
+ },
6958
+ {
6959
+ "epoch": 2.77,
6960
+ "learning_rate": 0.0002,
6961
+ "loss": 0.3479,
6962
+ "step": 7330
6963
+ },
6964
+ {
6965
+ "epoch": 2.77,
6966
+ "learning_rate": 0.0002,
6967
+ "loss": 0.3102,
6968
+ "step": 7340
6969
+ },
6970
+ {
6971
+ "epoch": 2.77,
6972
+ "learning_rate": 0.0002,
6973
+ "loss": 0.347,
6974
+ "step": 7350
6975
+ },
6976
+ {
6977
+ "epoch": 2.78,
6978
+ "learning_rate": 0.0002,
6979
+ "loss": 0.2887,
6980
+ "step": 7360
6981
+ },
6982
+ {
6983
+ "epoch": 2.78,
6984
+ "learning_rate": 0.0002,
6985
+ "loss": 0.3102,
6986
+ "step": 7370
6987
+ },
6988
+ {
6989
+ "epoch": 2.79,
6990
+ "learning_rate": 0.0002,
6991
+ "loss": 0.2836,
6992
+ "step": 7380
6993
+ },
6994
+ {
6995
+ "epoch": 2.79,
6996
+ "learning_rate": 0.0002,
6997
+ "loss": 0.3318,
6998
+ "step": 7390
6999
+ },
7000
+ {
7001
+ "epoch": 2.79,
7002
+ "learning_rate": 0.0002,
7003
+ "loss": 0.3302,
7004
+ "step": 7400
7005
+ },
7006
+ {
7007
+ "epoch": 2.79,
7008
+ "eval_loss": 0.4154573082923889,
7009
+ "eval_runtime": 103.7239,
7010
+ "eval_samples_per_second": 9.641,
7011
+ "eval_steps_per_second": 4.82,
7012
+ "step": 7400
7013
+ },
7014
+ {
7015
+ "epoch": 2.79,
7016
+ "mmlu_eval_accuracy": 0.5001281347092236,
7017
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
7018
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
7019
+ "mmlu_eval_accuracy_astronomy": 0.375,
7020
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7021
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7022
+ "mmlu_eval_accuracy_college_biology": 0.375,
7023
+ "mmlu_eval_accuracy_college_chemistry": 0.5,
7024
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7025
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
7026
+ "mmlu_eval_accuracy_college_medicine": 0.5,
7027
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7028
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
7029
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7030
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7031
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7032
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7033
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
7034
+ "mmlu_eval_accuracy_global_facts": 0.6,
7035
+ "mmlu_eval_accuracy_high_school_biology": 0.5,
7036
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
7037
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7038
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7039
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7040
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
7041
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
7042
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7043
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5769230769230769,
7044
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
7045
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7046
+ "mmlu_eval_accuracy_high_school_statistics": 0.4782608695652174,
7047
+ "mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
7048
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
7049
+ "mmlu_eval_accuracy_human_aging": 0.5652173913043478,
7050
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7051
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7052
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
7053
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7054
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
7055
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7056
+ "mmlu_eval_accuracy_marketing": 0.84,
7057
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
7058
+ "mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
7059
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
7060
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
7061
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7062
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
7063
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
7064
+ "mmlu_eval_accuracy_professional_accounting": 0.22580645161290322,
7065
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
7066
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
7067
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
7068
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7069
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7070
+ "mmlu_eval_accuracy_sociology": 0.7727272727272727,
7071
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7072
+ "mmlu_eval_accuracy_virology": 0.5,
7073
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7074
+ "mmlu_loss": 1.677480318060743,
7075
+ "step": 7400
7076
+ },
7077
+ {
7078
+ "epoch": 2.8,
7079
+ "learning_rate": 0.0002,
7080
+ "loss": 0.3231,
7081
+ "step": 7410
7082
+ },
7083
+ {
7084
+ "epoch": 2.8,
7085
+ "learning_rate": 0.0002,
7086
+ "loss": 0.301,
7087
+ "step": 7420
7088
+ },
7089
+ {
7090
+ "epoch": 2.8,
7091
+ "learning_rate": 0.0002,
7092
+ "loss": 0.3314,
7093
+ "step": 7430
7094
+ },
7095
+ {
7096
+ "epoch": 2.81,
7097
+ "learning_rate": 0.0002,
7098
+ "loss": 0.343,
7099
+ "step": 7440
7100
+ },
7101
+ {
7102
+ "epoch": 2.81,
7103
+ "learning_rate": 0.0002,
7104
+ "loss": 0.2938,
7105
+ "step": 7450
7106
+ },
7107
+ {
7108
+ "epoch": 2.82,
7109
+ "learning_rate": 0.0002,
7110
+ "loss": 0.2858,
7111
+ "step": 7460
7112
+ },
7113
+ {
7114
+ "epoch": 2.82,
7115
+ "learning_rate": 0.0002,
7116
+ "loss": 0.3167,
7117
+ "step": 7470
7118
+ },
7119
+ {
7120
+ "epoch": 2.82,
7121
+ "learning_rate": 0.0002,
7122
+ "loss": 0.289,
7123
+ "step": 7480
7124
+ },
7125
+ {
7126
+ "epoch": 2.83,
7127
+ "learning_rate": 0.0002,
7128
+ "loss": 0.322,
7129
+ "step": 7490
7130
+ },
7131
+ {
7132
+ "epoch": 2.83,
7133
+ "learning_rate": 0.0002,
7134
+ "loss": 0.2898,
7135
+ "step": 7500
7136
+ },
7137
+ {
7138
+ "epoch": 2.83,
7139
+ "learning_rate": 0.0002,
7140
+ "loss": 0.2986,
7141
+ "step": 7510
7142
+ },
7143
+ {
7144
+ "epoch": 2.84,
7145
+ "learning_rate": 0.0002,
7146
+ "loss": 0.3036,
7147
+ "step": 7520
7148
+ },
7149
+ {
7150
+ "epoch": 2.84,
7151
+ "learning_rate": 0.0002,
7152
+ "loss": 0.345,
7153
+ "step": 7530
7154
+ },
7155
+ {
7156
+ "epoch": 2.85,
7157
+ "learning_rate": 0.0002,
7158
+ "loss": 0.3197,
7159
+ "step": 7540
7160
+ },
7161
+ {
7162
+ "epoch": 2.85,
7163
+ "learning_rate": 0.0002,
7164
+ "loss": 0.3223,
7165
+ "step": 7550
7166
+ },
7167
+ {
7168
+ "epoch": 2.85,
7169
+ "learning_rate": 0.0002,
7170
+ "loss": 0.336,
7171
+ "step": 7560
7172
+ },
7173
+ {
7174
+ "epoch": 2.86,
7175
+ "learning_rate": 0.0002,
7176
+ "loss": 0.3247,
7177
+ "step": 7570
7178
+ },
7179
+ {
7180
+ "epoch": 2.86,
7181
+ "learning_rate": 0.0002,
7182
+ "loss": 0.3396,
7183
+ "step": 7580
7184
+ },
7185
+ {
7186
+ "epoch": 2.86,
7187
+ "learning_rate": 0.0002,
7188
+ "loss": 0.3076,
7189
+ "step": 7590
7190
+ },
7191
+ {
7192
+ "epoch": 2.87,
7193
+ "learning_rate": 0.0002,
7194
+ "loss": 0.3146,
7195
+ "step": 7600
7196
+ },
7197
+ {
7198
+ "epoch": 2.87,
7199
+ "eval_loss": 0.4194304347038269,
7200
+ "eval_runtime": 103.7236,
7201
+ "eval_samples_per_second": 9.641,
7202
+ "eval_steps_per_second": 4.821,
7203
+ "step": 7600
7204
+ },
7205
+ {
7206
+ "epoch": 2.87,
7207
+ "mmlu_eval_accuracy": 0.48653590156888393,
7208
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
7209
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
7210
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7211
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7212
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7213
+ "mmlu_eval_accuracy_college_biology": 0.4375,
7214
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7215
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7216
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
7217
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7218
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7219
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7220
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7221
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7222
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
7223
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7224
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7225
+ "mmlu_eval_accuracy_global_facts": 0.5,
7226
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7227
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
7228
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7229
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7230
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7231
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
7232
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7233
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
7234
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
7235
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
7236
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
7237
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
7238
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7239
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7240
+ "mmlu_eval_accuracy_human_aging": 0.5652173913043478,
7241
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7242
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7243
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
7244
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7245
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
7246
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7247
+ "mmlu_eval_accuracy_marketing": 0.84,
7248
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
7249
+ "mmlu_eval_accuracy_miscellaneous": 0.7209302325581395,
7250
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
7251
+ "mmlu_eval_accuracy_moral_scenarios": 0.22,
7252
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
7253
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
7254
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
7255
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
7256
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
7257
+ "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
7258
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
7259
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7260
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7261
+ "mmlu_eval_accuracy_sociology": 0.7727272727272727,
7262
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7263
+ "mmlu_eval_accuracy_virology": 0.5,
7264
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7265
+ "mmlu_loss": 1.509614323744886,
7266
+ "step": 7600
7267
+ },
7268
+ {
7269
+ "epoch": 2.87,
7270
+ "learning_rate": 0.0002,
7271
+ "loss": 0.3066,
7272
+ "step": 7610
7273
+ },
7274
+ {
7275
+ "epoch": 2.88,
7276
+ "learning_rate": 0.0002,
7277
+ "loss": 0.3146,
7278
+ "step": 7620
7279
+ },
7280
+ {
7281
+ "epoch": 2.88,
7282
+ "learning_rate": 0.0002,
7283
+ "loss": 0.3398,
7284
+ "step": 7630
7285
+ },
7286
+ {
7287
+ "epoch": 2.88,
7288
+ "learning_rate": 0.0002,
7289
+ "loss": 0.3143,
7290
+ "step": 7640
7291
+ },
7292
+ {
7293
+ "epoch": 2.89,
7294
+ "learning_rate": 0.0002,
7295
+ "loss": 0.3212,
7296
+ "step": 7650
7297
+ },
7298
+ {
7299
+ "epoch": 2.89,
7300
+ "learning_rate": 0.0002,
7301
+ "loss": 2.1283,
7302
+ "step": 7660
7303
+ },
7304
+ {
7305
+ "epoch": 2.89,
7306
+ "learning_rate": 0.0002,
7307
+ "loss": 0.475,
7308
+ "step": 7670
7309
+ },
7310
+ {
7311
+ "epoch": 2.9,
7312
+ "learning_rate": 0.0002,
7313
+ "loss": 0.4166,
7314
+ "step": 7680
7315
+ },
7316
+ {
7317
+ "epoch": 2.9,
7318
+ "learning_rate": 0.0002,
7319
+ "loss": 0.3667,
7320
+ "step": 7690
7321
+ },
7322
+ {
7323
+ "epoch": 2.91,
7324
+ "learning_rate": 0.0002,
7325
+ "loss": 0.2853,
7326
+ "step": 7700
7327
+ },
7328
+ {
7329
+ "epoch": 2.91,
7330
+ "learning_rate": 0.0002,
7331
+ "loss": 0.3093,
7332
+ "step": 7710
7333
+ },
7334
+ {
7335
+ "epoch": 2.91,
7336
+ "learning_rate": 0.0002,
7337
+ "loss": 0.3029,
7338
+ "step": 7720
7339
+ },
7340
+ {
7341
+ "epoch": 2.92,
7342
+ "learning_rate": 0.0002,
7343
+ "loss": 0.3032,
7344
+ "step": 7730
7345
+ },
7346
+ {
7347
+ "epoch": 2.92,
7348
+ "learning_rate": 0.0002,
7349
+ "loss": 0.307,
7350
+ "step": 7740
7351
+ },
7352
+ {
7353
+ "epoch": 2.92,
7354
+ "learning_rate": 0.0002,
7355
+ "loss": 0.306,
7356
+ "step": 7750
7357
+ },
7358
+ {
7359
+ "epoch": 2.93,
7360
+ "learning_rate": 0.0002,
7361
+ "loss": 0.3327,
7362
+ "step": 7760
7363
+ },
7364
+ {
7365
+ "epoch": 2.93,
7366
+ "learning_rate": 0.0002,
7367
+ "loss": 0.3181,
7368
+ "step": 7770
7369
+ },
7370
+ {
7371
+ "epoch": 2.94,
7372
+ "learning_rate": 0.0002,
7373
+ "loss": 0.2981,
7374
+ "step": 7780
7375
+ },
7376
+ {
7377
+ "epoch": 2.94,
7378
+ "learning_rate": 0.0002,
7379
+ "loss": 0.3015,
7380
+ "step": 7790
7381
+ },
7382
+ {
7383
+ "epoch": 2.94,
7384
+ "learning_rate": 0.0002,
7385
+ "loss": 0.2942,
7386
+ "step": 7800
7387
+ },
7388
+ {
7389
+ "epoch": 2.94,
7390
+ "eval_loss": 0.4162120223045349,
7391
+ "eval_runtime": 103.7868,
7392
+ "eval_samples_per_second": 9.635,
7393
+ "eval_steps_per_second": 4.818,
7394
+ "step": 7800
7395
+ },
7396
+ {
7397
+ "epoch": 2.94,
7398
+ "mmlu_eval_accuracy": 0.49443843830722334,
7399
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
7400
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7401
+ "mmlu_eval_accuracy_astronomy": 0.5,
7402
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
7403
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7404
+ "mmlu_eval_accuracy_college_biology": 0.5625,
7405
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7406
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7407
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7408
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
7409
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7410
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7411
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
7412
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7413
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7414
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
7415
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7416
+ "mmlu_eval_accuracy_global_facts": 0.5,
7417
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
7418
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
7419
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
7420
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7421
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
7422
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
7423
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5348837209302325,
7424
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7425
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
7426
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
7427
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7428
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7429
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7430
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7431
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
7432
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7433
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7434
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7435
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7436
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7437
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
7438
+ "mmlu_eval_accuracy_marketing": 0.84,
7439
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
7440
+ "mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
7441
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
7442
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
7443
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7444
+ "mmlu_eval_accuracy_philosophy": 0.5,
7445
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7446
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
7447
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
7448
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7449
+ "mmlu_eval_accuracy_professional_psychology": 0.43478260869565216,
7450
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7451
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7452
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7453
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7454
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
7455
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7456
+ "mmlu_loss": 1.607425860301944,
7457
+ "step": 7800
7458
+ },
7459
+ {
7460
+ "epoch": 2.95,
7461
+ "learning_rate": 0.0002,
7462
+ "loss": 0.3156,
7463
+ "step": 7810
7464
+ },
7465
+ {
7466
+ "epoch": 2.95,
7467
+ "learning_rate": 0.0002,
7468
+ "loss": 0.3059,
7469
+ "step": 7820
7470
+ },
7471
+ {
7472
+ "epoch": 2.95,
7473
+ "learning_rate": 0.0002,
7474
+ "loss": 0.3122,
7475
+ "step": 7830
7476
+ },
7477
+ {
7478
+ "epoch": 2.96,
7479
+ "learning_rate": 0.0002,
7480
+ "loss": 0.2988,
7481
+ "step": 7840
7482
+ },
7483
+ {
7484
+ "epoch": 2.96,
7485
+ "learning_rate": 0.0002,
7486
+ "loss": 0.3727,
7487
+ "step": 7850
7488
+ },
7489
+ {
7490
+ "epoch": 2.97,
7491
+ "learning_rate": 0.0002,
7492
+ "loss": 0.3247,
7493
+ "step": 7860
7494
+ },
7495
+ {
7496
+ "epoch": 2.97,
7497
+ "learning_rate": 0.0002,
7498
+ "loss": 0.3352,
7499
+ "step": 7870
7500
+ },
7501
+ {
7502
+ "epoch": 2.97,
7503
+ "learning_rate": 0.0002,
7504
+ "loss": 0.3353,
7505
+ "step": 7880
7506
+ },
7507
+ {
7508
+ "epoch": 2.98,
7509
+ "learning_rate": 0.0002,
7510
+ "loss": 0.3066,
7511
+ "step": 7890
7512
+ },
7513
+ {
7514
+ "epoch": 2.98,
7515
+ "learning_rate": 0.0002,
7516
+ "loss": 0.3056,
7517
+ "step": 7900
7518
+ },
7519
+ {
7520
+ "epoch": 2.99,
7521
+ "learning_rate": 0.0002,
7522
+ "loss": 0.3363,
7523
+ "step": 7910
7524
+ },
7525
+ {
7526
+ "epoch": 2.99,
7527
+ "learning_rate": 0.0002,
7528
+ "loss": 0.2936,
7529
+ "step": 7920
7530
+ },
7531
+ {
7532
+ "epoch": 2.99,
7533
+ "learning_rate": 0.0002,
7534
+ "loss": 0.2977,
7535
+ "step": 7930
7536
+ },
7537
+ {
7538
+ "epoch": 3.0,
7539
+ "learning_rate": 0.0002,
7540
+ "loss": 0.3366,
7541
+ "step": 7940
7542
+ },
7543
+ {
7544
+ "epoch": 3.0,
7545
+ "learning_rate": 0.0002,
7546
+ "loss": 0.3138,
7547
+ "step": 7950
7548
+ },
7549
+ {
7550
+ "epoch": 3.0,
7551
+ "learning_rate": 0.0002,
7552
+ "loss": 0.2463,
7553
+ "step": 7960
7554
+ },
7555
+ {
7556
+ "epoch": 3.01,
7557
+ "learning_rate": 0.0002,
7558
+ "loss": 0.2455,
7559
+ "step": 7970
7560
+ },
7561
+ {
7562
+ "epoch": 3.01,
7563
+ "learning_rate": 0.0002,
7564
+ "loss": 0.222,
7565
+ "step": 7980
7566
+ },
7567
+ {
7568
+ "epoch": 3.02,
7569
+ "learning_rate": 0.0002,
7570
+ "loss": 0.2584,
7571
+ "step": 7990
7572
+ },
7573
+ {
7574
+ "epoch": 3.02,
7575
+ "learning_rate": 0.0002,
7576
+ "loss": 0.2191,
7577
+ "step": 8000
7578
+ },
7579
+ {
7580
+ "epoch": 3.02,
7581
+ "eval_loss": 0.44157958030700684,
7582
+ "eval_runtime": 103.7343,
7583
+ "eval_samples_per_second": 9.64,
7584
+ "eval_steps_per_second": 4.82,
7585
+ "step": 8000
7586
+ },
7587
+ {
7588
+ "epoch": 3.02,
7589
+ "mmlu_eval_accuracy": 0.5070759338034547,
7590
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
7591
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7592
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7593
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
7594
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7595
+ "mmlu_eval_accuracy_college_biology": 0.5625,
7596
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7597
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7598
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7599
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
7600
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7601
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7602
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7603
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7604
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
7605
+ "mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634,
7606
+ "mmlu_eval_accuracy_formal_logic": 0.35714285714285715,
7607
+ "mmlu_eval_accuracy_global_facts": 0.5,
7608
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7609
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
7610
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
7611
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7612
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7613
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
7614
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
7615
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
7616
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5769230769230769,
7617
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
7618
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7619
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7620
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7621
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7622
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
7623
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
7624
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7625
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7626
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7627
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
7628
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
7629
+ "mmlu_eval_accuracy_marketing": 0.84,
7630
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7631
+ "mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
7632
+ "mmlu_eval_accuracy_moral_disputes": 0.5789473684210527,
7633
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7634
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
7635
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
7636
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7637
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7638
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
7639
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
7640
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
7641
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7642
+ "mmlu_eval_accuracy_security_studies": 0.37037037037037035,
7643
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7644
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7645
+ "mmlu_eval_accuracy_virology": 0.5,
7646
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7647
+ "mmlu_loss": 1.7326317684768697,
7648
+ "step": 8000
7649
+ },
7650
+ {
7651
+ "epoch": 3.02,
7652
+ "learning_rate": 0.0002,
7653
+ "loss": 0.2559,
7654
+ "step": 8010
7655
+ },
7656
+ {
7657
+ "epoch": 3.03,
7658
+ "learning_rate": 0.0002,
7659
+ "loss": 0.2461,
7660
+ "step": 8020
7661
+ },
7662
+ {
7663
+ "epoch": 3.03,
7664
+ "learning_rate": 0.0002,
7665
+ "loss": 0.2483,
7666
+ "step": 8030
7667
+ },
7668
+ {
7669
+ "epoch": 3.03,
7670
+ "learning_rate": 0.0002,
7671
+ "loss": 0.2793,
7672
+ "step": 8040
7673
+ },
7674
+ {
7675
+ "epoch": 3.04,
7676
+ "learning_rate": 0.0002,
7677
+ "loss": 0.2798,
7678
+ "step": 8050
7679
+ },
7680
+ {
7681
+ "epoch": 3.04,
7682
+ "learning_rate": 0.0002,
7683
+ "loss": 0.2513,
7684
+ "step": 8060
7685
+ },
7686
+ {
7687
+ "epoch": 3.05,
7688
+ "learning_rate": 0.0002,
7689
+ "loss": 0.26,
7690
+ "step": 8070
7691
+ },
7692
+ {
7693
+ "epoch": 3.05,
7694
+ "learning_rate": 0.0002,
7695
+ "loss": 0.2244,
7696
+ "step": 8080
7697
+ },
7698
+ {
7699
+ "epoch": 3.05,
7700
+ "learning_rate": 0.0002,
7701
+ "loss": 0.2245,
7702
+ "step": 8090
7703
+ },
7704
+ {
7705
+ "epoch": 3.06,
7706
+ "learning_rate": 0.0002,
7707
+ "loss": 0.2473,
7708
+ "step": 8100
7709
+ },
7710
+ {
7711
+ "epoch": 3.06,
7712
+ "learning_rate": 0.0002,
7713
+ "loss": 0.2722,
7714
+ "step": 8110
7715
+ },
7716
+ {
7717
+ "epoch": 3.06,
7718
+ "learning_rate": 0.0002,
7719
+ "loss": 0.2332,
7720
+ "step": 8120
7721
+ },
7722
+ {
7723
+ "epoch": 3.07,
7724
+ "learning_rate": 0.0002,
7725
+ "loss": 0.2472,
7726
+ "step": 8130
7727
+ },
7728
+ {
7729
+ "epoch": 3.07,
7730
+ "learning_rate": 0.0002,
7731
+ "loss": 0.2793,
7732
+ "step": 8140
7733
+ },
7734
+ {
7735
+ "epoch": 3.08,
7736
+ "learning_rate": 0.0002,
7737
+ "loss": 0.255,
7738
+ "step": 8150
7739
+ },
7740
+ {
7741
+ "epoch": 3.08,
7742
+ "learning_rate": 0.0002,
7743
+ "loss": 0.233,
7744
+ "step": 8160
7745
+ },
7746
+ {
7747
+ "epoch": 3.08,
7748
+ "learning_rate": 0.0002,
7749
+ "loss": 0.218,
7750
+ "step": 8170
7751
+ },
7752
+ {
7753
+ "epoch": 3.09,
7754
+ "learning_rate": 0.0002,
7755
+ "loss": 0.2363,
7756
+ "step": 8180
7757
+ },
7758
+ {
7759
+ "epoch": 3.09,
7760
+ "learning_rate": 0.0002,
7761
+ "loss": 0.2689,
7762
+ "step": 8190
7763
+ },
7764
+ {
7765
+ "epoch": 3.09,
7766
+ "learning_rate": 0.0002,
7767
+ "loss": 0.2431,
7768
+ "step": 8200
7769
+ },
7770
+ {
7771
+ "epoch": 3.09,
7772
+ "eval_loss": 0.44677451252937317,
7773
+ "eval_runtime": 103.6908,
7774
+ "eval_samples_per_second": 9.644,
7775
+ "eval_steps_per_second": 4.822,
7776
+ "step": 8200
7777
+ },
7778
+ {
7779
+ "epoch": 3.09,
7780
+ "mmlu_eval_accuracy": 0.49684302016607873,
7781
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7782
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
7783
+ "mmlu_eval_accuracy_astronomy": 0.5625,
7784
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
7785
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7786
+ "mmlu_eval_accuracy_college_biology": 0.5,
7787
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7788
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7789
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7790
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
7791
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7792
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7793
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7794
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7795
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
7796
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7797
+ "mmlu_eval_accuracy_formal_logic": 0.35714285714285715,
7798
+ "mmlu_eval_accuracy_global_facts": 0.4,
7799
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
7800
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
7801
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
7802
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7803
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7804
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7805
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
7806
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7807
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7808
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
7809
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7810
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
7811
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7812
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7813
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
7814
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
7815
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7816
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
7817
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7818
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7819
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
7820
+ "mmlu_eval_accuracy_marketing": 0.84,
7821
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
7822
+ "mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
7823
+ "mmlu_eval_accuracy_moral_disputes": 0.5789473684210527,
7824
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
7825
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
7826
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
7827
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
7828
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
7829
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
7830
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
7831
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
7832
+ "mmlu_eval_accuracy_public_relations": 0.5,
7833
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7834
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7835
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7836
+ "mmlu_eval_accuracy_virology": 0.5,
7837
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7838
+ "mmlu_loss": 1.6755223667185861,
7839
+ "step": 8200
7840
+ },
7841
+ {
7842
+ "epoch": 3.1,
7843
+ "learning_rate": 0.0002,
7844
+ "loss": 0.256,
7845
+ "step": 8210
7846
+ },
7847
+ {
7848
+ "epoch": 3.1,
7849
+ "learning_rate": 0.0002,
7850
+ "loss": 0.2537,
7851
+ "step": 8220
7852
+ },
7853
+ {
7854
+ "epoch": 3.11,
7855
+ "learning_rate": 0.0002,
7856
+ "loss": 0.2487,
7857
+ "step": 8230
7858
+ },
7859
+ {
7860
+ "epoch": 3.11,
7861
+ "learning_rate": 0.0002,
7862
+ "loss": 0.2516,
7863
+ "step": 8240
7864
+ },
7865
+ {
7866
+ "epoch": 3.11,
7867
+ "learning_rate": 0.0002,
7868
+ "loss": 0.2536,
7869
+ "step": 8250
7870
+ },
7871
+ {
7872
+ "epoch": 3.12,
7873
+ "learning_rate": 0.0002,
7874
+ "loss": 0.2443,
7875
+ "step": 8260
7876
+ },
7877
+ {
7878
+ "epoch": 3.12,
7879
+ "learning_rate": 0.0002,
7880
+ "loss": 0.2416,
7881
+ "step": 8270
7882
+ },
7883
+ {
7884
+ "epoch": 3.12,
7885
+ "learning_rate": 0.0002,
7886
+ "loss": 0.2233,
7887
+ "step": 8280
7888
+ },
7889
+ {
7890
+ "epoch": 3.13,
7891
+ "learning_rate": 0.0002,
7892
+ "loss": 0.2477,
7893
+ "step": 8290
7894
+ },
7895
+ {
7896
+ "epoch": 3.13,
7897
+ "learning_rate": 0.0002,
7898
+ "loss": 0.2589,
7899
+ "step": 8300
7900
+ },
7901
+ {
7902
+ "epoch": 3.14,
7903
+ "learning_rate": 0.0002,
7904
+ "loss": 0.2895,
7905
+ "step": 8310
7906
+ },
7907
+ {
7908
+ "epoch": 3.14,
7909
+ "learning_rate": 0.0002,
7910
+ "loss": 0.2549,
7911
+ "step": 8320
7912
+ },
7913
+ {
7914
+ "epoch": 3.14,
7915
+ "learning_rate": 0.0002,
7916
+ "loss": 0.2598,
7917
+ "step": 8330
7918
+ },
7919
+ {
7920
+ "epoch": 3.15,
7921
+ "learning_rate": 0.0002,
7922
+ "loss": 0.2644,
7923
+ "step": 8340
7924
+ },
7925
+ {
7926
+ "epoch": 3.15,
7927
+ "learning_rate": 0.0002,
7928
+ "loss": 0.2634,
7929
+ "step": 8350
7930
+ },
7931
+ {
7932
+ "epoch": 3.16,
7933
+ "learning_rate": 0.0002,
7934
+ "loss": 0.2791,
7935
+ "step": 8360
7936
+ },
7937
+ {
7938
+ "epoch": 3.16,
7939
+ "learning_rate": 0.0002,
7940
+ "loss": 0.268,
7941
+ "step": 8370
7942
+ },
7943
+ {
7944
+ "epoch": 3.16,
7945
+ "learning_rate": 0.0002,
7946
+ "loss": 0.2445,
7947
+ "step": 8380
7948
+ },
7949
+ {
7950
+ "epoch": 3.17,
7951
+ "learning_rate": 0.0002,
7952
+ "loss": 0.244,
7953
+ "step": 8390
7954
+ },
7955
+ {
7956
+ "epoch": 3.17,
7957
+ "learning_rate": 0.0002,
7958
+ "loss": 0.2543,
7959
+ "step": 8400
7960
+ },
7961
+ {
7962
+ "epoch": 3.17,
7963
+ "eval_loss": 0.44773170351982117,
7964
+ "eval_runtime": 103.8011,
7965
+ "eval_samples_per_second": 9.634,
7966
+ "eval_steps_per_second": 4.817,
7967
+ "step": 8400
7968
+ },
7969
+ {
7970
+ "epoch": 3.17,
7971
+ "mmlu_eval_accuracy": 0.5005608847321679,
7972
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7973
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
7974
+ "mmlu_eval_accuracy_astronomy": 0.375,
7975
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
7976
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
7977
+ "mmlu_eval_accuracy_college_biology": 0.5,
7978
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7979
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7980
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7981
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7982
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7983
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7984
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7985
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7986
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
7987
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7988
+ "mmlu_eval_accuracy_formal_logic": 0.42857142857142855,
7989
+ "mmlu_eval_accuracy_global_facts": 0.5,
7990
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7991
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
7992
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7993
+ "mmlu_eval_accuracy_high_school_european_history": 0.7222222222222222,
7994
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7995
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7996
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
7997
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7998
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
7999
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
8000
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
8001
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
8002
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8003
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
8004
+ "mmlu_eval_accuracy_human_aging": 0.5652173913043478,
8005
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
8006
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8007
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
8008
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8009
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
8010
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
8011
+ "mmlu_eval_accuracy_marketing": 0.84,
8012
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
8013
+ "mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
8014
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
8015
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
8016
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
8017
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
8018
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
8019
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
8020
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
8021
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8022
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
8023
+ "mmlu_eval_accuracy_public_relations": 0.5,
8024
+ "mmlu_eval_accuracy_security_studies": 0.37037037037037035,
8025
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
8026
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
8027
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
8028
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
8029
+ "mmlu_loss": 1.913919418155369,
8030
+ "step": 8400
8031
+ },
8032
+ {
8033
+ "epoch": 3.17,
8034
+ "learning_rate": 0.0002,
8035
+ "loss": 0.2659,
8036
+ "step": 8410
8037
+ },
8038
+ {
8039
+ "epoch": 3.18,
8040
+ "learning_rate": 0.0002,
8041
+ "loss": 0.2569,
8042
+ "step": 8420
8043
+ },
8044
+ {
8045
+ "epoch": 3.18,
8046
+ "learning_rate": 0.0002,
8047
+ "loss": 0.2549,
8048
+ "step": 8430
8049
+ },
8050
+ {
8051
+ "epoch": 3.19,
8052
+ "learning_rate": 0.0002,
8053
+ "loss": 0.2692,
8054
+ "step": 8440
8055
+ },
8056
+ {
8057
+ "epoch": 3.19,
8058
+ "learning_rate": 0.0002,
8059
+ "loss": 0.253,
8060
+ "step": 8450
8061
+ },
8062
+ {
8063
+ "epoch": 3.19,
8064
+ "learning_rate": 0.0002,
8065
+ "loss": 0.2554,
8066
+ "step": 8460
8067
+ },
8068
+ {
8069
+ "epoch": 3.2,
8070
+ "learning_rate": 0.0002,
8071
+ "loss": 0.2434,
8072
+ "step": 8470
8073
+ },
8074
+ {
8075
+ "epoch": 3.2,
8076
+ "learning_rate": 0.0002,
8077
+ "loss": 0.2782,
8078
+ "step": 8480
8079
+ },
8080
+ {
8081
+ "epoch": 3.2,
8082
+ "learning_rate": 0.0002,
8083
+ "loss": 0.248,
8084
+ "step": 8490
8085
+ },
8086
+ {
8087
+ "epoch": 3.21,
8088
+ "learning_rate": 0.0002,
8089
+ "loss": 0.2421,
8090
+ "step": 8500
8091
+ },
8092
+ {
8093
+ "epoch": 3.21,
8094
+ "learning_rate": 0.0002,
8095
+ "loss": 0.2713,
8096
+ "step": 8510
8097
+ },
8098
+ {
8099
+ "epoch": 3.22,
8100
+ "learning_rate": 0.0002,
8101
+ "loss": 0.302,
8102
+ "step": 8520
8103
+ },
8104
+ {
8105
+ "epoch": 3.22,
8106
+ "learning_rate": 0.0002,
8107
+ "loss": 0.2296,
8108
+ "step": 8530
8109
+ },
8110
+ {
8111
+ "epoch": 3.22,
8112
+ "learning_rate": 0.0002,
8113
+ "loss": 0.2504,
8114
+ "step": 8540
8115
+ },
8116
+ {
8117
+ "epoch": 3.23,
8118
+ "learning_rate": 0.0002,
8119
+ "loss": 0.2448,
8120
+ "step": 8550
8121
+ },
8122
+ {
8123
+ "epoch": 3.23,
8124
+ "learning_rate": 0.0002,
8125
+ "loss": 0.2559,
8126
+ "step": 8560
8127
+ },
8128
+ {
8129
+ "epoch": 3.23,
8130
+ "learning_rate": 0.0002,
8131
+ "loss": 0.2477,
8132
+ "step": 8570
8133
+ },
8134
+ {
8135
+ "epoch": 3.24,
8136
+ "learning_rate": 0.0002,
8137
+ "loss": 0.2633,
8138
+ "step": 8580
8139
+ },
8140
+ {
8141
+ "epoch": 3.24,
8142
+ "learning_rate": 0.0002,
8143
+ "loss": 0.2511,
8144
+ "step": 8590
8145
+ },
8146
+ {
8147
+ "epoch": 3.25,
8148
+ "learning_rate": 0.0002,
8149
+ "loss": 0.2375,
8150
+ "step": 8600
8151
+ },
8152
+ {
8153
+ "epoch": 3.25,
8154
+ "eval_loss": 0.4444422721862793,
8155
+ "eval_runtime": 103.8053,
8156
+ "eval_samples_per_second": 9.633,
8157
+ "eval_steps_per_second": 4.817,
8158
+ "step": 8600
8159
+ },
8160
+ {
8161
+ "epoch": 3.25,
8162
+ "mmlu_eval_accuracy": 0.49446869409691946,
8163
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
8164
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
8165
+ "mmlu_eval_accuracy_astronomy": 0.375,
8166
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
8167
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
8168
+ "mmlu_eval_accuracy_college_biology": 0.5,
8169
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
8170
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
8171
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
8172
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
8173
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
8174
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
8175
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
8176
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
8177
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
8178
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
8179
+ "mmlu_eval_accuracy_formal_logic": 0.5,
8180
+ "mmlu_eval_accuracy_global_facts": 0.5,
8181
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
8182
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
8183
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8184
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
8185
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
8186
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8187
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
8188
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
8189
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
8190
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
8191
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
8192
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8193
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8194
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
8195
+ "mmlu_eval_accuracy_human_aging": 0.5652173913043478,
8196
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
8197
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8198
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8199
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
8200
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
8201
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
8202
+ "mmlu_eval_accuracy_marketing": 0.84,
8203
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
8204
+ "mmlu_eval_accuracy_miscellaneous": 0.7209302325581395,
8205
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
8206
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
8207
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
8208
+ "mmlu_eval_accuracy_philosophy": 0.5882352941176471,
8209
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
8210
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
8211
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
8212
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
8213
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
8214
+ "mmlu_eval_accuracy_public_relations": 0.5,
8215
+ "mmlu_eval_accuracy_security_studies": 0.37037037037037035,
8216
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
8217
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
8218
+ "mmlu_eval_accuracy_virology": 0.5,
8219
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8220
+ "mmlu_loss": 1.8425939399408608,
8221
+ "step": 8600
8222
  }
8223
  ],
8224
  "max_steps": 10000,
8225
  "num_train_epochs": 4,
8226
+ "total_flos": 6.14125213221077e+17,
8227
  "trial_name": null,
8228
  "trial_params": null
8229
  }
{checkpoint-6600 β†’ checkpoint-8600}/training_args.bin RENAMED
File without changes