Farouk commited on
Commit
8a66cd6
Β·
1 Parent(s): 0e6bed7

Training in progress, step 8200

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fef3653dd0787a9908ba488be34aec736b7912b797d7047762361701966e6b9e
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da9c2e804ceb56d688da870c2ee97b2dc49e2e662041baea6a6e9d6905b86daf
3
  size 319977229
checkpoint-4000/adapter_model/adapter_model/README.md CHANGED
@@ -213,6 +213,17 @@ The following `bitsandbytes` quantization config was used during training:
213
  - bnb_4bit_use_double_quant: True
214
  - bnb_4bit_compute_dtype: bfloat16
215
 
 
 
 
 
 
 
 
 
 
 
 
216
  The following `bitsandbytes` quantization config was used during training:
217
  - load_in_8bit: False
218
  - load_in_4bit: True
@@ -244,5 +255,6 @@ The following `bitsandbytes` quantization config was used during training:
244
  - PEFT 0.4.0
245
  - PEFT 0.4.0
246
  - PEFT 0.4.0
 
247
 
248
  - PEFT 0.4.0
 
213
  - bnb_4bit_use_double_quant: True
214
  - bnb_4bit_compute_dtype: bfloat16
215
 
216
+ The following `bitsandbytes` quantization config was used during training:
217
+ - load_in_8bit: False
218
+ - load_in_4bit: True
219
+ - llm_int8_threshold: 6.0
220
+ - llm_int8_skip_modules: None
221
+ - llm_int8_enable_fp32_cpu_offload: False
222
+ - llm_int8_has_fp16_weight: False
223
+ - bnb_4bit_quant_type: nf4
224
+ - bnb_4bit_use_double_quant: True
225
+ - bnb_4bit_compute_dtype: bfloat16
226
+
227
  The following `bitsandbytes` quantization config was used during training:
228
  - load_in_8bit: False
229
  - load_in_4bit: True
 
255
  - PEFT 0.4.0
256
  - PEFT 0.4.0
257
  - PEFT 0.4.0
258
+ - PEFT 0.4.0
259
 
260
  - PEFT 0.4.0
checkpoint-4000/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:498a224ba7636eecb11558905701b32ae45e4c24ff0179788571d74b4a62f865
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fef3653dd0787a9908ba488be34aec736b7912b797d7047762361701966e6b9e
3
  size 319977229
{checkpoint-6200 β†’ checkpoint-8200}/README.md RENAMED
File without changes
{checkpoint-6200 β†’ checkpoint-8200}/adapter_config.json RENAMED
File without changes
{checkpoint-6200 β†’ checkpoint-8200}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b46dfbc97b26476d5cc53984e35d2e29d4f127428f50303827f2d18321bd98c7
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da9c2e804ceb56d688da870c2ee97b2dc49e2e662041baea6a6e9d6905b86daf
3
  size 319977229
{checkpoint-6200 β†’ checkpoint-8200}/added_tokens.json RENAMED
File without changes
{checkpoint-6200 β†’ checkpoint-8200}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0773fe4117707e0ae2c5b23415323a7f2b7162119103a5e880819e8e7ea8f77
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccfad99a31fcc5c731dc76fd7ca093f20e99ae9c6e3ce4401fb058183fe80114
3
  size 1279539973
{checkpoint-6200 β†’ checkpoint-8200}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad84a667992a8ba1d3d2486c993cc7084110e1996db092f8e4674053b0643fa9
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2ddc98b34e53adf3b7abd6fce991eda9b93ede1b0277cdc9fa8ca3221c8dff4
3
  size 14511
{checkpoint-6200 β†’ checkpoint-8200}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00a2cdb3e0cba9c4eb9fe8ce4b971cf52e18a108de1c45e1a56c83cbbb819ccc
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:613102d8c8b309b4aec1d07a4f439649e72414ddd8dba3c06a125a1039277b82
3
  size 627
{checkpoint-6200 β†’ checkpoint-8200}/special_tokens_map.json RENAMED
File without changes
{checkpoint-6200 β†’ checkpoint-8200}/tokenizer.model RENAMED
File without changes
{checkpoint-6200 β†’ checkpoint-8200}/tokenizer_config.json RENAMED
File without changes
{checkpoint-6200 β†’ checkpoint-8200}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": 0.758108913898468,
3
  "best_model_checkpoint": "experts/expert-5/checkpoint-4000",
4
- "epoch": 1.500484027105518,
5
- "global_step": 6200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -5927,11 +5927,1921 @@
5927
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
5928
  "mmlu_loss": 1.1923443862959238,
5929
  "step": 6200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5930
  }
5931
  ],
5932
  "max_steps": 10000,
5933
  "num_train_epochs": 3,
5934
- "total_flos": 6.866328733497754e+17,
5935
  "trial_name": null,
5936
  "trial_params": null
5937
  }
 
1
  {
2
  "best_metric": 0.758108913898468,
3
  "best_model_checkpoint": "experts/expert-5/checkpoint-4000",
4
+ "epoch": 1.9845111326234268,
5
+ "global_step": 8200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
5927
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
5928
  "mmlu_loss": 1.1923443862959238,
5929
  "step": 6200
5930
+ },
5931
+ {
5932
+ "epoch": 1.5,
5933
+ "learning_rate": 0.0002,
5934
+ "loss": 0.6624,
5935
+ "step": 6210
5936
+ },
5937
+ {
5938
+ "epoch": 1.51,
5939
+ "learning_rate": 0.0002,
5940
+ "loss": 0.6457,
5941
+ "step": 6220
5942
+ },
5943
+ {
5944
+ "epoch": 1.51,
5945
+ "learning_rate": 0.0002,
5946
+ "loss": 0.6656,
5947
+ "step": 6230
5948
+ },
5949
+ {
5950
+ "epoch": 1.51,
5951
+ "learning_rate": 0.0002,
5952
+ "loss": 0.675,
5953
+ "step": 6240
5954
+ },
5955
+ {
5956
+ "epoch": 1.51,
5957
+ "learning_rate": 0.0002,
5958
+ "loss": 0.6694,
5959
+ "step": 6250
5960
+ },
5961
+ {
5962
+ "epoch": 1.52,
5963
+ "learning_rate": 0.0002,
5964
+ "loss": 0.6347,
5965
+ "step": 6260
5966
+ },
5967
+ {
5968
+ "epoch": 1.52,
5969
+ "learning_rate": 0.0002,
5970
+ "loss": 0.6442,
5971
+ "step": 6270
5972
+ },
5973
+ {
5974
+ "epoch": 1.52,
5975
+ "learning_rate": 0.0002,
5976
+ "loss": 0.667,
5977
+ "step": 6280
5978
+ },
5979
+ {
5980
+ "epoch": 1.52,
5981
+ "learning_rate": 0.0002,
5982
+ "loss": 0.7039,
5983
+ "step": 6290
5984
+ },
5985
+ {
5986
+ "epoch": 1.52,
5987
+ "learning_rate": 0.0002,
5988
+ "loss": 0.7089,
5989
+ "step": 6300
5990
+ },
5991
+ {
5992
+ "epoch": 1.53,
5993
+ "learning_rate": 0.0002,
5994
+ "loss": 0.6863,
5995
+ "step": 6310
5996
+ },
5997
+ {
5998
+ "epoch": 1.53,
5999
+ "learning_rate": 0.0002,
6000
+ "loss": 0.7129,
6001
+ "step": 6320
6002
+ },
6003
+ {
6004
+ "epoch": 1.53,
6005
+ "learning_rate": 0.0002,
6006
+ "loss": 0.6318,
6007
+ "step": 6330
6008
+ },
6009
+ {
6010
+ "epoch": 1.53,
6011
+ "learning_rate": 0.0002,
6012
+ "loss": 0.7183,
6013
+ "step": 6340
6014
+ },
6015
+ {
6016
+ "epoch": 1.54,
6017
+ "learning_rate": 0.0002,
6018
+ "loss": 0.6498,
6019
+ "step": 6350
6020
+ },
6021
+ {
6022
+ "epoch": 1.54,
6023
+ "learning_rate": 0.0002,
6024
+ "loss": 0.7159,
6025
+ "step": 6360
6026
+ },
6027
+ {
6028
+ "epoch": 1.54,
6029
+ "learning_rate": 0.0002,
6030
+ "loss": 0.682,
6031
+ "step": 6370
6032
+ },
6033
+ {
6034
+ "epoch": 1.54,
6035
+ "learning_rate": 0.0002,
6036
+ "loss": 0.7131,
6037
+ "step": 6380
6038
+ },
6039
+ {
6040
+ "epoch": 1.55,
6041
+ "learning_rate": 0.0002,
6042
+ "loss": 0.6699,
6043
+ "step": 6390
6044
+ },
6045
+ {
6046
+ "epoch": 1.55,
6047
+ "learning_rate": 0.0002,
6048
+ "loss": 0.6652,
6049
+ "step": 6400
6050
+ },
6051
+ {
6052
+ "epoch": 1.55,
6053
+ "eval_loss": 0.7618897557258606,
6054
+ "eval_runtime": 146.8533,
6055
+ "eval_samples_per_second": 6.81,
6056
+ "eval_steps_per_second": 3.405,
6057
+ "step": 6400
6058
+ },
6059
+ {
6060
+ "epoch": 1.55,
6061
+ "mmlu_eval_accuracy": 0.49447073883624965,
6062
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6063
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
6064
+ "mmlu_eval_accuracy_astronomy": 0.375,
6065
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6066
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
6067
+ "mmlu_eval_accuracy_college_biology": 0.4375,
6068
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6069
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6070
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6071
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
6072
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
6073
+ "mmlu_eval_accuracy_computer_security": 0.7272727272727273,
6074
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6075
+ "mmlu_eval_accuracy_econometrics": 0.25,
6076
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6077
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
6078
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
6079
+ "mmlu_eval_accuracy_global_facts": 0.6,
6080
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
6081
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
6082
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6083
+ "mmlu_eval_accuracy_high_school_european_history": 0.5,
6084
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
6085
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
6086
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
6087
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6088
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
6089
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
6090
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
6091
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
6092
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6093
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6094
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6095
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6096
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6097
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
6098
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
6099
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
6100
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
6101
+ "mmlu_eval_accuracy_marketing": 0.76,
6102
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6103
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6104
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
6105
+ "mmlu_eval_accuracy_moral_scenarios": 0.22,
6106
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
6107
+ "mmlu_eval_accuracy_philosophy": 0.5,
6108
+ "mmlu_eval_accuracy_prehistory": 0.6285714285714286,
6109
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
6110
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
6111
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
6112
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
6113
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
6114
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6115
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
6116
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6117
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6118
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
6119
+ "mmlu_loss": 1.1299385156472732,
6120
+ "step": 6400
6121
+ },
6122
+ {
6123
+ "epoch": 1.55,
6124
+ "learning_rate": 0.0002,
6125
+ "loss": 0.7423,
6126
+ "step": 6410
6127
+ },
6128
+ {
6129
+ "epoch": 1.55,
6130
+ "learning_rate": 0.0002,
6131
+ "loss": 0.6713,
6132
+ "step": 6420
6133
+ },
6134
+ {
6135
+ "epoch": 1.56,
6136
+ "learning_rate": 0.0002,
6137
+ "loss": 0.6798,
6138
+ "step": 6430
6139
+ },
6140
+ {
6141
+ "epoch": 1.56,
6142
+ "learning_rate": 0.0002,
6143
+ "loss": 0.648,
6144
+ "step": 6440
6145
+ },
6146
+ {
6147
+ "epoch": 1.56,
6148
+ "learning_rate": 0.0002,
6149
+ "loss": 0.6531,
6150
+ "step": 6450
6151
+ },
6152
+ {
6153
+ "epoch": 1.56,
6154
+ "learning_rate": 0.0002,
6155
+ "loss": 0.6473,
6156
+ "step": 6460
6157
+ },
6158
+ {
6159
+ "epoch": 1.57,
6160
+ "learning_rate": 0.0002,
6161
+ "loss": 0.6463,
6162
+ "step": 6470
6163
+ },
6164
+ {
6165
+ "epoch": 1.57,
6166
+ "learning_rate": 0.0002,
6167
+ "loss": 0.6275,
6168
+ "step": 6480
6169
+ },
6170
+ {
6171
+ "epoch": 1.57,
6172
+ "learning_rate": 0.0002,
6173
+ "loss": 0.7251,
6174
+ "step": 6490
6175
+ },
6176
+ {
6177
+ "epoch": 1.57,
6178
+ "learning_rate": 0.0002,
6179
+ "loss": 0.6544,
6180
+ "step": 6500
6181
+ },
6182
+ {
6183
+ "epoch": 1.58,
6184
+ "learning_rate": 0.0002,
6185
+ "loss": 0.6497,
6186
+ "step": 6510
6187
+ },
6188
+ {
6189
+ "epoch": 1.58,
6190
+ "learning_rate": 0.0002,
6191
+ "loss": 0.729,
6192
+ "step": 6520
6193
+ },
6194
+ {
6195
+ "epoch": 1.58,
6196
+ "learning_rate": 0.0002,
6197
+ "loss": 0.6562,
6198
+ "step": 6530
6199
+ },
6200
+ {
6201
+ "epoch": 1.58,
6202
+ "learning_rate": 0.0002,
6203
+ "loss": 0.6471,
6204
+ "step": 6540
6205
+ },
6206
+ {
6207
+ "epoch": 1.59,
6208
+ "learning_rate": 0.0002,
6209
+ "loss": 0.6597,
6210
+ "step": 6550
6211
+ },
6212
+ {
6213
+ "epoch": 1.59,
6214
+ "learning_rate": 0.0002,
6215
+ "loss": 0.5999,
6216
+ "step": 6560
6217
+ },
6218
+ {
6219
+ "epoch": 1.59,
6220
+ "learning_rate": 0.0002,
6221
+ "loss": 0.6925,
6222
+ "step": 6570
6223
+ },
6224
+ {
6225
+ "epoch": 1.59,
6226
+ "learning_rate": 0.0002,
6227
+ "loss": 0.6591,
6228
+ "step": 6580
6229
+ },
6230
+ {
6231
+ "epoch": 1.59,
6232
+ "learning_rate": 0.0002,
6233
+ "loss": 0.6821,
6234
+ "step": 6590
6235
+ },
6236
+ {
6237
+ "epoch": 1.6,
6238
+ "learning_rate": 0.0002,
6239
+ "loss": 0.6571,
6240
+ "step": 6600
6241
+ },
6242
+ {
6243
+ "epoch": 1.6,
6244
+ "eval_loss": 0.7628704905509949,
6245
+ "eval_runtime": 146.8958,
6246
+ "eval_samples_per_second": 6.808,
6247
+ "eval_steps_per_second": 3.404,
6248
+ "step": 6600
6249
+ },
6250
+ {
6251
+ "epoch": 1.6,
6252
+ "mmlu_eval_accuracy": 0.49208846366982484,
6253
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6254
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
6255
+ "mmlu_eval_accuracy_astronomy": 0.375,
6256
+ "mmlu_eval_accuracy_business_ethics": 0.36363636363636365,
6257
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
6258
+ "mmlu_eval_accuracy_college_biology": 0.5,
6259
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6260
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6261
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6262
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6263
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6264
+ "mmlu_eval_accuracy_computer_security": 0.7272727272727273,
6265
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6266
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6267
+ "mmlu_eval_accuracy_electrical_engineering": 0.1875,
6268
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
6269
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
6270
+ "mmlu_eval_accuracy_global_facts": 0.6,
6271
+ "mmlu_eval_accuracy_high_school_biology": 0.5,
6272
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6273
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6274
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6275
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6276
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
6277
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
6278
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
6279
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.6153846153846154,
6280
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
6281
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
6282
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
6283
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
6284
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6285
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
6286
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6287
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6288
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
6289
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
6290
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
6291
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
6292
+ "mmlu_eval_accuracy_marketing": 0.76,
6293
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
6294
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
6295
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
6296
+ "mmlu_eval_accuracy_moral_scenarios": 0.22,
6297
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6298
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
6299
+ "mmlu_eval_accuracy_prehistory": 0.6,
6300
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
6301
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
6302
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
6303
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
6304
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
6305
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
6306
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
6307
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6308
+ "mmlu_eval_accuracy_virology": 0.5,
6309
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6310
+ "mmlu_loss": 1.1676515557513847,
6311
+ "step": 6600
6312
+ },
6313
+ {
6314
+ "epoch": 1.6,
6315
+ "learning_rate": 0.0002,
6316
+ "loss": 0.7397,
6317
+ "step": 6610
6318
+ },
6319
+ {
6320
+ "epoch": 1.6,
6321
+ "learning_rate": 0.0002,
6322
+ "loss": 0.6331,
6323
+ "step": 6620
6324
+ },
6325
+ {
6326
+ "epoch": 1.6,
6327
+ "learning_rate": 0.0002,
6328
+ "loss": 0.7298,
6329
+ "step": 6630
6330
+ },
6331
+ {
6332
+ "epoch": 1.61,
6333
+ "learning_rate": 0.0002,
6334
+ "loss": 0.6663,
6335
+ "step": 6640
6336
+ },
6337
+ {
6338
+ "epoch": 1.61,
6339
+ "learning_rate": 0.0002,
6340
+ "loss": 0.6981,
6341
+ "step": 6650
6342
+ },
6343
+ {
6344
+ "epoch": 1.61,
6345
+ "learning_rate": 0.0002,
6346
+ "loss": 0.6436,
6347
+ "step": 6660
6348
+ },
6349
+ {
6350
+ "epoch": 1.61,
6351
+ "learning_rate": 0.0002,
6352
+ "loss": 0.6787,
6353
+ "step": 6670
6354
+ },
6355
+ {
6356
+ "epoch": 1.62,
6357
+ "learning_rate": 0.0002,
6358
+ "loss": 0.6776,
6359
+ "step": 6680
6360
+ },
6361
+ {
6362
+ "epoch": 1.62,
6363
+ "learning_rate": 0.0002,
6364
+ "loss": 0.6156,
6365
+ "step": 6690
6366
+ },
6367
+ {
6368
+ "epoch": 1.62,
6369
+ "learning_rate": 0.0002,
6370
+ "loss": 0.7107,
6371
+ "step": 6700
6372
+ },
6373
+ {
6374
+ "epoch": 1.62,
6375
+ "learning_rate": 0.0002,
6376
+ "loss": 0.6686,
6377
+ "step": 6710
6378
+ },
6379
+ {
6380
+ "epoch": 1.63,
6381
+ "learning_rate": 0.0002,
6382
+ "loss": 0.7684,
6383
+ "step": 6720
6384
+ },
6385
+ {
6386
+ "epoch": 1.63,
6387
+ "learning_rate": 0.0002,
6388
+ "loss": 0.7053,
6389
+ "step": 6730
6390
+ },
6391
+ {
6392
+ "epoch": 1.63,
6393
+ "learning_rate": 0.0002,
6394
+ "loss": 0.6801,
6395
+ "step": 6740
6396
+ },
6397
+ {
6398
+ "epoch": 1.63,
6399
+ "learning_rate": 0.0002,
6400
+ "loss": 0.6163,
6401
+ "step": 6750
6402
+ },
6403
+ {
6404
+ "epoch": 1.64,
6405
+ "learning_rate": 0.0002,
6406
+ "loss": 0.675,
6407
+ "step": 6760
6408
+ },
6409
+ {
6410
+ "epoch": 1.64,
6411
+ "learning_rate": 0.0002,
6412
+ "loss": 0.6691,
6413
+ "step": 6770
6414
+ },
6415
+ {
6416
+ "epoch": 1.64,
6417
+ "learning_rate": 0.0002,
6418
+ "loss": 0.6887,
6419
+ "step": 6780
6420
+ },
6421
+ {
6422
+ "epoch": 1.64,
6423
+ "learning_rate": 0.0002,
6424
+ "loss": 0.7246,
6425
+ "step": 6790
6426
+ },
6427
+ {
6428
+ "epoch": 1.65,
6429
+ "learning_rate": 0.0002,
6430
+ "loss": 0.6964,
6431
+ "step": 6800
6432
+ },
6433
+ {
6434
+ "epoch": 1.65,
6435
+ "eval_loss": 0.7599997520446777,
6436
+ "eval_runtime": 146.8712,
6437
+ "eval_samples_per_second": 6.809,
6438
+ "eval_steps_per_second": 3.404,
6439
+ "step": 6800
6440
+ },
6441
+ {
6442
+ "epoch": 1.65,
6443
+ "mmlu_eval_accuracy": 0.4884868609859884,
6444
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6445
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6446
+ "mmlu_eval_accuracy_astronomy": 0.375,
6447
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6448
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
6449
+ "mmlu_eval_accuracy_college_biology": 0.4375,
6450
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6451
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6452
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6453
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6454
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6455
+ "mmlu_eval_accuracy_computer_security": 0.7272727272727273,
6456
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6457
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6458
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6459
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
6460
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
6461
+ "mmlu_eval_accuracy_global_facts": 0.5,
6462
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
6463
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6464
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6465
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6466
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6467
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
6468
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
6469
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
6470
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6471
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
6472
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6473
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
6474
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
6475
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
6476
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
6477
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6478
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6479
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
6480
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
6481
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
6482
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
6483
+ "mmlu_eval_accuracy_marketing": 0.76,
6484
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
6485
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6486
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
6487
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
6488
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
6489
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
6490
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
6491
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
6492
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
6493
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
6494
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
6495
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
6496
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6497
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
6498
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6499
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
6500
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6501
+ "mmlu_loss": 1.128095783546139,
6502
+ "step": 6800
6503
+ },
6504
+ {
6505
+ "epoch": 1.65,
6506
+ "learning_rate": 0.0002,
6507
+ "loss": 0.6226,
6508
+ "step": 6810
6509
+ },
6510
+ {
6511
+ "epoch": 1.65,
6512
+ "learning_rate": 0.0002,
6513
+ "loss": 0.66,
6514
+ "step": 6820
6515
+ },
6516
+ {
6517
+ "epoch": 1.65,
6518
+ "learning_rate": 0.0002,
6519
+ "loss": 0.7219,
6520
+ "step": 6830
6521
+ },
6522
+ {
6523
+ "epoch": 1.66,
6524
+ "learning_rate": 0.0002,
6525
+ "loss": 0.7092,
6526
+ "step": 6840
6527
+ },
6528
+ {
6529
+ "epoch": 1.66,
6530
+ "learning_rate": 0.0002,
6531
+ "loss": 0.7236,
6532
+ "step": 6850
6533
+ },
6534
+ {
6535
+ "epoch": 1.66,
6536
+ "learning_rate": 0.0002,
6537
+ "loss": 0.7054,
6538
+ "step": 6860
6539
+ },
6540
+ {
6541
+ "epoch": 1.66,
6542
+ "learning_rate": 0.0002,
6543
+ "loss": 0.6618,
6544
+ "step": 6870
6545
+ },
6546
+ {
6547
+ "epoch": 1.67,
6548
+ "learning_rate": 0.0002,
6549
+ "loss": 0.6606,
6550
+ "step": 6880
6551
+ },
6552
+ {
6553
+ "epoch": 1.67,
6554
+ "learning_rate": 0.0002,
6555
+ "loss": 0.7409,
6556
+ "step": 6890
6557
+ },
6558
+ {
6559
+ "epoch": 1.67,
6560
+ "learning_rate": 0.0002,
6561
+ "loss": 0.6906,
6562
+ "step": 6900
6563
+ },
6564
+ {
6565
+ "epoch": 1.67,
6566
+ "learning_rate": 0.0002,
6567
+ "loss": 0.7061,
6568
+ "step": 6910
6569
+ },
6570
+ {
6571
+ "epoch": 1.67,
6572
+ "learning_rate": 0.0002,
6573
+ "loss": 0.6416,
6574
+ "step": 6920
6575
+ },
6576
+ {
6577
+ "epoch": 1.68,
6578
+ "learning_rate": 0.0002,
6579
+ "loss": 0.649,
6580
+ "step": 6930
6581
+ },
6582
+ {
6583
+ "epoch": 1.68,
6584
+ "learning_rate": 0.0002,
6585
+ "loss": 0.7244,
6586
+ "step": 6940
6587
+ },
6588
+ {
6589
+ "epoch": 1.68,
6590
+ "learning_rate": 0.0002,
6591
+ "loss": 0.654,
6592
+ "step": 6950
6593
+ },
6594
+ {
6595
+ "epoch": 1.68,
6596
+ "learning_rate": 0.0002,
6597
+ "loss": 0.6884,
6598
+ "step": 6960
6599
+ },
6600
+ {
6601
+ "epoch": 1.69,
6602
+ "learning_rate": 0.0002,
6603
+ "loss": 0.6297,
6604
+ "step": 6970
6605
+ },
6606
+ {
6607
+ "epoch": 1.69,
6608
+ "learning_rate": 0.0002,
6609
+ "loss": 0.6346,
6610
+ "step": 6980
6611
+ },
6612
+ {
6613
+ "epoch": 1.69,
6614
+ "learning_rate": 0.0002,
6615
+ "loss": 0.6196,
6616
+ "step": 6990
6617
+ },
6618
+ {
6619
+ "epoch": 1.69,
6620
+ "learning_rate": 0.0002,
6621
+ "loss": 0.6725,
6622
+ "step": 7000
6623
+ },
6624
+ {
6625
+ "epoch": 1.69,
6626
+ "eval_loss": 0.7621538639068604,
6627
+ "eval_runtime": 146.8049,
6628
+ "eval_samples_per_second": 6.812,
6629
+ "eval_steps_per_second": 3.406,
6630
+ "step": 7000
6631
+ },
6632
+ {
6633
+ "epoch": 1.69,
6634
+ "mmlu_eval_accuracy": 0.4835875989487375,
6635
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6636
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6637
+ "mmlu_eval_accuracy_astronomy": 0.375,
6638
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
6639
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
6640
+ "mmlu_eval_accuracy_college_biology": 0.4375,
6641
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6642
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
6643
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
6644
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6645
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
6646
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6647
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
6648
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6649
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
6650
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
6651
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
6652
+ "mmlu_eval_accuracy_global_facts": 0.2,
6653
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
6654
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
6655
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6656
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6657
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
6658
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6659
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
6660
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
6661
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
6662
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
6663
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
6664
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
6665
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6666
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
6667
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6668
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6669
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6670
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6671
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
6672
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
6673
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
6674
+ "mmlu_eval_accuracy_marketing": 0.84,
6675
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6676
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
6677
+ "mmlu_eval_accuracy_moral_disputes": 0.39473684210526316,
6678
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
6679
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
6680
+ "mmlu_eval_accuracy_philosophy": 0.5,
6681
+ "mmlu_eval_accuracy_prehistory": 0.6,
6682
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
6683
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
6684
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
6685
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
6686
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
6687
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6688
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6689
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
6690
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
6691
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6692
+ "mmlu_loss": 1.1675736687824558,
6693
+ "step": 7000
6694
+ },
6695
+ {
6696
+ "epoch": 1.7,
6697
+ "learning_rate": 0.0002,
6698
+ "loss": 0.6509,
6699
+ "step": 7010
6700
+ },
6701
+ {
6702
+ "epoch": 1.7,
6703
+ "learning_rate": 0.0002,
6704
+ "loss": 0.6814,
6705
+ "step": 7020
6706
+ },
6707
+ {
6708
+ "epoch": 1.7,
6709
+ "learning_rate": 0.0002,
6710
+ "loss": 0.6802,
6711
+ "step": 7030
6712
+ },
6713
+ {
6714
+ "epoch": 1.7,
6715
+ "learning_rate": 0.0002,
6716
+ "loss": 0.6031,
6717
+ "step": 7040
6718
+ },
6719
+ {
6720
+ "epoch": 1.71,
6721
+ "learning_rate": 0.0002,
6722
+ "loss": 0.6157,
6723
+ "step": 7050
6724
+ },
6725
+ {
6726
+ "epoch": 1.71,
6727
+ "learning_rate": 0.0002,
6728
+ "loss": 0.7485,
6729
+ "step": 7060
6730
+ },
6731
+ {
6732
+ "epoch": 1.71,
6733
+ "learning_rate": 0.0002,
6734
+ "loss": 0.6672,
6735
+ "step": 7070
6736
+ },
6737
+ {
6738
+ "epoch": 1.71,
6739
+ "learning_rate": 0.0002,
6740
+ "loss": 0.6681,
6741
+ "step": 7080
6742
+ },
6743
+ {
6744
+ "epoch": 1.72,
6745
+ "learning_rate": 0.0002,
6746
+ "loss": 0.7042,
6747
+ "step": 7090
6748
+ },
6749
+ {
6750
+ "epoch": 1.72,
6751
+ "learning_rate": 0.0002,
6752
+ "loss": 0.6757,
6753
+ "step": 7100
6754
+ },
6755
+ {
6756
+ "epoch": 1.72,
6757
+ "learning_rate": 0.0002,
6758
+ "loss": 0.6393,
6759
+ "step": 7110
6760
+ },
6761
+ {
6762
+ "epoch": 1.72,
6763
+ "learning_rate": 0.0002,
6764
+ "loss": 0.7289,
6765
+ "step": 7120
6766
+ },
6767
+ {
6768
+ "epoch": 1.73,
6769
+ "learning_rate": 0.0002,
6770
+ "loss": 0.6166,
6771
+ "step": 7130
6772
+ },
6773
+ {
6774
+ "epoch": 1.73,
6775
+ "learning_rate": 0.0002,
6776
+ "loss": 0.6704,
6777
+ "step": 7140
6778
+ },
6779
+ {
6780
+ "epoch": 1.73,
6781
+ "learning_rate": 0.0002,
6782
+ "loss": 0.631,
6783
+ "step": 7150
6784
+ },
6785
+ {
6786
+ "epoch": 1.73,
6787
+ "learning_rate": 0.0002,
6788
+ "loss": 0.6736,
6789
+ "step": 7160
6790
+ },
6791
+ {
6792
+ "epoch": 1.74,
6793
+ "learning_rate": 0.0002,
6794
+ "loss": 0.6942,
6795
+ "step": 7170
6796
+ },
6797
+ {
6798
+ "epoch": 1.74,
6799
+ "learning_rate": 0.0002,
6800
+ "loss": 0.6573,
6801
+ "step": 7180
6802
+ },
6803
+ {
6804
+ "epoch": 1.74,
6805
+ "learning_rate": 0.0002,
6806
+ "loss": 0.6597,
6807
+ "step": 7190
6808
+ },
6809
+ {
6810
+ "epoch": 1.74,
6811
+ "learning_rate": 0.0002,
6812
+ "loss": 0.7386,
6813
+ "step": 7200
6814
+ },
6815
+ {
6816
+ "epoch": 1.74,
6817
+ "eval_loss": 0.7593604326248169,
6818
+ "eval_runtime": 146.9956,
6819
+ "eval_samples_per_second": 6.803,
6820
+ "eval_steps_per_second": 3.401,
6821
+ "step": 7200
6822
+ },
6823
+ {
6824
+ "epoch": 1.74,
6825
+ "mmlu_eval_accuracy": 0.488908494213702,
6826
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6827
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6828
+ "mmlu_eval_accuracy_astronomy": 0.5,
6829
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
6830
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
6831
+ "mmlu_eval_accuracy_college_biology": 0.375,
6832
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6833
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
6834
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
6835
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6836
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6837
+ "mmlu_eval_accuracy_computer_security": 0.6363636363636364,
6838
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
6839
+ "mmlu_eval_accuracy_econometrics": 0.25,
6840
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6841
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
6842
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
6843
+ "mmlu_eval_accuracy_global_facts": 0.5,
6844
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
6845
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
6846
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6847
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6848
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
6849
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
6850
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
6851
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
6852
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6853
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
6854
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6855
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
6856
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
6857
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
6858
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6859
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6860
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6861
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6862
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
6863
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
6864
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
6865
+ "mmlu_eval_accuracy_marketing": 0.76,
6866
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6867
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6868
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
6869
+ "mmlu_eval_accuracy_moral_scenarios": 0.22,
6870
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6871
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
6872
+ "mmlu_eval_accuracy_prehistory": 0.5714285714285714,
6873
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
6874
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
6875
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
6876
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
6877
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
6878
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
6879
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
6880
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
6881
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
6882
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6883
+ "mmlu_loss": 1.2372056163711274,
6884
+ "step": 7200
6885
+ },
6886
+ {
6887
+ "epoch": 1.74,
6888
+ "learning_rate": 0.0002,
6889
+ "loss": 0.7351,
6890
+ "step": 7210
6891
+ },
6892
+ {
6893
+ "epoch": 1.75,
6894
+ "learning_rate": 0.0002,
6895
+ "loss": 0.7823,
6896
+ "step": 7220
6897
+ },
6898
+ {
6899
+ "epoch": 1.75,
6900
+ "learning_rate": 0.0002,
6901
+ "loss": 0.5742,
6902
+ "step": 7230
6903
+ },
6904
+ {
6905
+ "epoch": 1.75,
6906
+ "learning_rate": 0.0002,
6907
+ "loss": 0.7067,
6908
+ "step": 7240
6909
+ },
6910
+ {
6911
+ "epoch": 1.75,
6912
+ "learning_rate": 0.0002,
6913
+ "loss": 0.7058,
6914
+ "step": 7250
6915
+ },
6916
+ {
6917
+ "epoch": 1.76,
6918
+ "learning_rate": 0.0002,
6919
+ "loss": 0.6627,
6920
+ "step": 7260
6921
+ },
6922
+ {
6923
+ "epoch": 1.76,
6924
+ "learning_rate": 0.0002,
6925
+ "loss": 0.5715,
6926
+ "step": 7270
6927
+ },
6928
+ {
6929
+ "epoch": 1.76,
6930
+ "learning_rate": 0.0002,
6931
+ "loss": 0.6729,
6932
+ "step": 7280
6933
+ },
6934
+ {
6935
+ "epoch": 1.76,
6936
+ "learning_rate": 0.0002,
6937
+ "loss": 0.6255,
6938
+ "step": 7290
6939
+ },
6940
+ {
6941
+ "epoch": 1.77,
6942
+ "learning_rate": 0.0002,
6943
+ "loss": 0.618,
6944
+ "step": 7300
6945
+ },
6946
+ {
6947
+ "epoch": 1.77,
6948
+ "learning_rate": 0.0002,
6949
+ "loss": 0.7364,
6950
+ "step": 7310
6951
+ },
6952
+ {
6953
+ "epoch": 1.77,
6954
+ "learning_rate": 0.0002,
6955
+ "loss": 0.7115,
6956
+ "step": 7320
6957
+ },
6958
+ {
6959
+ "epoch": 1.77,
6960
+ "learning_rate": 0.0002,
6961
+ "loss": 0.6601,
6962
+ "step": 7330
6963
+ },
6964
+ {
6965
+ "epoch": 1.78,
6966
+ "learning_rate": 0.0002,
6967
+ "loss": 0.617,
6968
+ "step": 7340
6969
+ },
6970
+ {
6971
+ "epoch": 1.78,
6972
+ "learning_rate": 0.0002,
6973
+ "loss": 0.5936,
6974
+ "step": 7350
6975
+ },
6976
+ {
6977
+ "epoch": 1.78,
6978
+ "learning_rate": 0.0002,
6979
+ "loss": 0.6842,
6980
+ "step": 7360
6981
+ },
6982
+ {
6983
+ "epoch": 1.78,
6984
+ "learning_rate": 0.0002,
6985
+ "loss": 0.6823,
6986
+ "step": 7370
6987
+ },
6988
+ {
6989
+ "epoch": 1.79,
6990
+ "learning_rate": 0.0002,
6991
+ "loss": 0.6603,
6992
+ "step": 7380
6993
+ },
6994
+ {
6995
+ "epoch": 1.79,
6996
+ "learning_rate": 0.0002,
6997
+ "loss": 0.7232,
6998
+ "step": 7390
6999
+ },
7000
+ {
7001
+ "epoch": 1.79,
7002
+ "learning_rate": 0.0002,
7003
+ "loss": 0.6867,
7004
+ "step": 7400
7005
+ },
7006
+ {
7007
+ "epoch": 1.79,
7008
+ "eval_loss": 0.7598956227302551,
7009
+ "eval_runtime": 146.929,
7010
+ "eval_samples_per_second": 6.806,
7011
+ "eval_steps_per_second": 3.403,
7012
+ "step": 7400
7013
+ },
7014
+ {
7015
+ "epoch": 1.79,
7016
+ "mmlu_eval_accuracy": 0.48278702228597276,
7017
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7018
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
7019
+ "mmlu_eval_accuracy_astronomy": 0.3125,
7020
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7021
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
7022
+ "mmlu_eval_accuracy_college_biology": 0.375,
7023
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7024
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7025
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7026
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
7027
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
7028
+ "mmlu_eval_accuracy_computer_security": 0.6363636363636364,
7029
+ "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
7030
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7031
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
7032
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
7033
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
7034
+ "mmlu_eval_accuracy_global_facts": 0.4,
7035
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
7036
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7037
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7038
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7039
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
7040
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7041
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
7042
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
7043
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
7044
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
7045
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
7046
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
7047
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7048
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7049
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
7050
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7051
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7052
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7053
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7054
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7055
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
7056
+ "mmlu_eval_accuracy_marketing": 0.76,
7057
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
7058
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
7059
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
7060
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7061
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
7062
+ "mmlu_eval_accuracy_philosophy": 0.38235294117647056,
7063
+ "mmlu_eval_accuracy_prehistory": 0.6,
7064
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
7065
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
7066
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
7067
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
7068
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7069
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
7070
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7071
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7072
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
7073
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7074
+ "mmlu_loss": 1.250969376522007,
7075
+ "step": 7400
7076
+ },
7077
+ {
7078
+ "epoch": 1.79,
7079
+ "learning_rate": 0.0002,
7080
+ "loss": 0.6575,
7081
+ "step": 7410
7082
+ },
7083
+ {
7084
+ "epoch": 1.8,
7085
+ "learning_rate": 0.0002,
7086
+ "loss": 0.7154,
7087
+ "step": 7420
7088
+ },
7089
+ {
7090
+ "epoch": 1.8,
7091
+ "learning_rate": 0.0002,
7092
+ "loss": 0.6546,
7093
+ "step": 7430
7094
+ },
7095
+ {
7096
+ "epoch": 1.8,
7097
+ "learning_rate": 0.0002,
7098
+ "loss": 0.6694,
7099
+ "step": 7440
7100
+ },
7101
+ {
7102
+ "epoch": 1.8,
7103
+ "learning_rate": 0.0002,
7104
+ "loss": 0.7499,
7105
+ "step": 7450
7106
+ },
7107
+ {
7108
+ "epoch": 1.81,
7109
+ "learning_rate": 0.0002,
7110
+ "loss": 0.6388,
7111
+ "step": 7460
7112
+ },
7113
+ {
7114
+ "epoch": 1.81,
7115
+ "learning_rate": 0.0002,
7116
+ "loss": 0.6882,
7117
+ "step": 7470
7118
+ },
7119
+ {
7120
+ "epoch": 1.81,
7121
+ "learning_rate": 0.0002,
7122
+ "loss": 0.7236,
7123
+ "step": 7480
7124
+ },
7125
+ {
7126
+ "epoch": 1.81,
7127
+ "learning_rate": 0.0002,
7128
+ "loss": 0.664,
7129
+ "step": 7490
7130
+ },
7131
+ {
7132
+ "epoch": 1.82,
7133
+ "learning_rate": 0.0002,
7134
+ "loss": 0.6824,
7135
+ "step": 7500
7136
+ },
7137
+ {
7138
+ "epoch": 1.82,
7139
+ "learning_rate": 0.0002,
7140
+ "loss": 0.6791,
7141
+ "step": 7510
7142
+ },
7143
+ {
7144
+ "epoch": 1.82,
7145
+ "learning_rate": 0.0002,
7146
+ "loss": 0.7206,
7147
+ "step": 7520
7148
+ },
7149
+ {
7150
+ "epoch": 1.82,
7151
+ "learning_rate": 0.0002,
7152
+ "loss": 0.692,
7153
+ "step": 7530
7154
+ },
7155
+ {
7156
+ "epoch": 1.82,
7157
+ "learning_rate": 0.0002,
7158
+ "loss": 0.6443,
7159
+ "step": 7540
7160
+ },
7161
+ {
7162
+ "epoch": 1.83,
7163
+ "learning_rate": 0.0002,
7164
+ "loss": 0.6652,
7165
+ "step": 7550
7166
+ },
7167
+ {
7168
+ "epoch": 1.83,
7169
+ "learning_rate": 0.0002,
7170
+ "loss": 0.6978,
7171
+ "step": 7560
7172
+ },
7173
+ {
7174
+ "epoch": 1.83,
7175
+ "learning_rate": 0.0002,
7176
+ "loss": 0.654,
7177
+ "step": 7570
7178
+ },
7179
+ {
7180
+ "epoch": 1.83,
7181
+ "learning_rate": 0.0002,
7182
+ "loss": 0.6733,
7183
+ "step": 7580
7184
+ },
7185
+ {
7186
+ "epoch": 1.84,
7187
+ "learning_rate": 0.0002,
7188
+ "loss": 0.7104,
7189
+ "step": 7590
7190
+ },
7191
+ {
7192
+ "epoch": 1.84,
7193
+ "learning_rate": 0.0002,
7194
+ "loss": 0.723,
7195
+ "step": 7600
7196
+ },
7197
+ {
7198
+ "epoch": 1.84,
7199
+ "eval_loss": 0.7613999247550964,
7200
+ "eval_runtime": 146.9457,
7201
+ "eval_samples_per_second": 6.805,
7202
+ "eval_steps_per_second": 3.403,
7203
+ "step": 7600
7204
+ },
7205
+ {
7206
+ "epoch": 1.84,
7207
+ "mmlu_eval_accuracy": 0.4899117351100044,
7208
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7209
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7210
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7211
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7212
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
7213
+ "mmlu_eval_accuracy_college_biology": 0.375,
7214
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
7215
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7216
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
7217
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7218
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
7219
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7220
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
7221
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7222
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
7223
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7224
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
7225
+ "mmlu_eval_accuracy_global_facts": 0.5,
7226
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7227
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
7228
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7229
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7230
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
7231
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7232
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
7233
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
7234
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
7235
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
7236
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
7237
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
7238
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7239
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7240
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7241
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7242
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7243
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7244
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7245
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7246
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
7247
+ "mmlu_eval_accuracy_marketing": 0.8,
7248
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7249
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
7250
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
7251
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
7252
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
7253
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
7254
+ "mmlu_eval_accuracy_prehistory": 0.6,
7255
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
7256
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
7257
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
7258
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
7259
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7260
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
7261
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
7262
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7263
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
7264
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7265
+ "mmlu_loss": 1.260864307029129,
7266
+ "step": 7600
7267
+ },
7268
+ {
7269
+ "epoch": 1.84,
7270
+ "learning_rate": 0.0002,
7271
+ "loss": 0.6886,
7272
+ "step": 7610
7273
+ },
7274
+ {
7275
+ "epoch": 1.84,
7276
+ "learning_rate": 0.0002,
7277
+ "loss": 0.7028,
7278
+ "step": 7620
7279
+ },
7280
+ {
7281
+ "epoch": 1.85,
7282
+ "learning_rate": 0.0002,
7283
+ "loss": 0.6053,
7284
+ "step": 7630
7285
+ },
7286
+ {
7287
+ "epoch": 1.85,
7288
+ "learning_rate": 0.0002,
7289
+ "loss": 0.6817,
7290
+ "step": 7640
7291
+ },
7292
+ {
7293
+ "epoch": 1.85,
7294
+ "learning_rate": 0.0002,
7295
+ "loss": 0.7574,
7296
+ "step": 7650
7297
+ },
7298
+ {
7299
+ "epoch": 1.85,
7300
+ "learning_rate": 0.0002,
7301
+ "loss": 0.6234,
7302
+ "step": 7660
7303
+ },
7304
+ {
7305
+ "epoch": 1.86,
7306
+ "learning_rate": 0.0002,
7307
+ "loss": 0.7127,
7308
+ "step": 7670
7309
+ },
7310
+ {
7311
+ "epoch": 1.86,
7312
+ "learning_rate": 0.0002,
7313
+ "loss": 0.6868,
7314
+ "step": 7680
7315
+ },
7316
+ {
7317
+ "epoch": 1.86,
7318
+ "learning_rate": 0.0002,
7319
+ "loss": 0.7013,
7320
+ "step": 7690
7321
+ },
7322
+ {
7323
+ "epoch": 1.86,
7324
+ "learning_rate": 0.0002,
7325
+ "loss": 0.6305,
7326
+ "step": 7700
7327
+ },
7328
+ {
7329
+ "epoch": 1.87,
7330
+ "learning_rate": 0.0002,
7331
+ "loss": 0.7254,
7332
+ "step": 7710
7333
+ },
7334
+ {
7335
+ "epoch": 1.87,
7336
+ "learning_rate": 0.0002,
7337
+ "loss": 0.7005,
7338
+ "step": 7720
7339
+ },
7340
+ {
7341
+ "epoch": 1.87,
7342
+ "learning_rate": 0.0002,
7343
+ "loss": 0.7288,
7344
+ "step": 7730
7345
+ },
7346
+ {
7347
+ "epoch": 1.87,
7348
+ "learning_rate": 0.0002,
7349
+ "loss": 0.7573,
7350
+ "step": 7740
7351
+ },
7352
+ {
7353
+ "epoch": 1.88,
7354
+ "learning_rate": 0.0002,
7355
+ "loss": 0.5988,
7356
+ "step": 7750
7357
+ },
7358
+ {
7359
+ "epoch": 1.88,
7360
+ "learning_rate": 0.0002,
7361
+ "loss": 0.7662,
7362
+ "step": 7760
7363
+ },
7364
+ {
7365
+ "epoch": 1.88,
7366
+ "learning_rate": 0.0002,
7367
+ "loss": 0.6742,
7368
+ "step": 7770
7369
+ },
7370
+ {
7371
+ "epoch": 1.88,
7372
+ "learning_rate": 0.0002,
7373
+ "loss": 0.6525,
7374
+ "step": 7780
7375
+ },
7376
+ {
7377
+ "epoch": 1.89,
7378
+ "learning_rate": 0.0002,
7379
+ "loss": 0.6918,
7380
+ "step": 7790
7381
+ },
7382
+ {
7383
+ "epoch": 1.89,
7384
+ "learning_rate": 0.0002,
7385
+ "loss": 0.6998,
7386
+ "step": 7800
7387
+ },
7388
+ {
7389
+ "epoch": 1.89,
7390
+ "eval_loss": 0.7608389854431152,
7391
+ "eval_runtime": 146.9045,
7392
+ "eval_samples_per_second": 6.807,
7393
+ "eval_steps_per_second": 3.404,
7394
+ "step": 7800
7395
+ },
7396
+ {
7397
+ "epoch": 1.89,
7398
+ "mmlu_eval_accuracy": 0.4877363525289947,
7399
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7400
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
7401
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7402
+ "mmlu_eval_accuracy_business_ethics": 0.36363636363636365,
7403
+ "mmlu_eval_accuracy_clinical_knowledge": 0.6206896551724138,
7404
+ "mmlu_eval_accuracy_college_biology": 0.375,
7405
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7406
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7407
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7408
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7409
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
7410
+ "mmlu_eval_accuracy_computer_security": 0.6363636363636364,
7411
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
7412
+ "mmlu_eval_accuracy_econometrics": 0.25,
7413
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7414
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7415
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
7416
+ "mmlu_eval_accuracy_global_facts": 0.5,
7417
+ "mmlu_eval_accuracy_high_school_biology": 0.5,
7418
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
7419
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7420
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7421
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7422
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7423
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7424
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7425
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7426
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
7427
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7428
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
7429
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
7430
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7431
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
7432
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7433
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7434
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7435
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7436
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7437
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7438
+ "mmlu_eval_accuracy_marketing": 0.76,
7439
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
7440
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
7441
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
7442
+ "mmlu_eval_accuracy_moral_scenarios": 0.28,
7443
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
7444
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
7445
+ "mmlu_eval_accuracy_prehistory": 0.6,
7446
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7447
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
7448
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
7449
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
7450
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7451
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
7452
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
7453
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7454
+ "mmlu_eval_accuracy_virology": 0.5,
7455
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7456
+ "mmlu_loss": 1.0666804730931088,
7457
+ "step": 7800
7458
+ },
7459
+ {
7460
+ "epoch": 1.89,
7461
+ "learning_rate": 0.0002,
7462
+ "loss": 0.6605,
7463
+ "step": 7810
7464
+ },
7465
+ {
7466
+ "epoch": 1.89,
7467
+ "learning_rate": 0.0002,
7468
+ "loss": 0.7623,
7469
+ "step": 7820
7470
+ },
7471
+ {
7472
+ "epoch": 1.89,
7473
+ "learning_rate": 0.0002,
7474
+ "loss": 0.788,
7475
+ "step": 7830
7476
+ },
7477
+ {
7478
+ "epoch": 1.9,
7479
+ "learning_rate": 0.0002,
7480
+ "loss": 0.76,
7481
+ "step": 7840
7482
+ },
7483
+ {
7484
+ "epoch": 1.9,
7485
+ "learning_rate": 0.0002,
7486
+ "loss": 0.7379,
7487
+ "step": 7850
7488
+ },
7489
+ {
7490
+ "epoch": 1.9,
7491
+ "learning_rate": 0.0002,
7492
+ "loss": 0.6891,
7493
+ "step": 7860
7494
+ },
7495
+ {
7496
+ "epoch": 1.9,
7497
+ "learning_rate": 0.0002,
7498
+ "loss": 0.7336,
7499
+ "step": 7870
7500
+ },
7501
+ {
7502
+ "epoch": 1.91,
7503
+ "learning_rate": 0.0002,
7504
+ "loss": 0.6093,
7505
+ "step": 7880
7506
+ },
7507
+ {
7508
+ "epoch": 1.91,
7509
+ "learning_rate": 0.0002,
7510
+ "loss": 0.6738,
7511
+ "step": 7890
7512
+ },
7513
+ {
7514
+ "epoch": 1.91,
7515
+ "learning_rate": 0.0002,
7516
+ "loss": 0.654,
7517
+ "step": 7900
7518
+ },
7519
+ {
7520
+ "epoch": 1.91,
7521
+ "learning_rate": 0.0002,
7522
+ "loss": 0.7356,
7523
+ "step": 7910
7524
+ },
7525
+ {
7526
+ "epoch": 1.92,
7527
+ "learning_rate": 0.0002,
7528
+ "loss": 0.6438,
7529
+ "step": 7920
7530
+ },
7531
+ {
7532
+ "epoch": 1.92,
7533
+ "learning_rate": 0.0002,
7534
+ "loss": 0.6108,
7535
+ "step": 7930
7536
+ },
7537
+ {
7538
+ "epoch": 1.92,
7539
+ "learning_rate": 0.0002,
7540
+ "loss": 0.6916,
7541
+ "step": 7940
7542
+ },
7543
+ {
7544
+ "epoch": 1.92,
7545
+ "learning_rate": 0.0002,
7546
+ "loss": 0.6645,
7547
+ "step": 7950
7548
+ },
7549
+ {
7550
+ "epoch": 1.93,
7551
+ "learning_rate": 0.0002,
7552
+ "loss": 0.6785,
7553
+ "step": 7960
7554
+ },
7555
+ {
7556
+ "epoch": 1.93,
7557
+ "learning_rate": 0.0002,
7558
+ "loss": 0.6541,
7559
+ "step": 7970
7560
+ },
7561
+ {
7562
+ "epoch": 1.93,
7563
+ "learning_rate": 0.0002,
7564
+ "loss": 0.6427,
7565
+ "step": 7980
7566
+ },
7567
+ {
7568
+ "epoch": 1.93,
7569
+ "learning_rate": 0.0002,
7570
+ "loss": 0.7183,
7571
+ "step": 7990
7572
+ },
7573
+ {
7574
+ "epoch": 1.94,
7575
+ "learning_rate": 0.0002,
7576
+ "loss": 0.6713,
7577
+ "step": 8000
7578
+ },
7579
+ {
7580
+ "epoch": 1.94,
7581
+ "eval_loss": 0.7599592804908752,
7582
+ "eval_runtime": 150.9203,
7583
+ "eval_samples_per_second": 6.626,
7584
+ "eval_steps_per_second": 3.313,
7585
+ "step": 8000
7586
+ },
7587
+ {
7588
+ "epoch": 1.94,
7589
+ "mmlu_eval_accuracy": 0.5021156119500032,
7590
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
7591
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
7592
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7593
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7594
+ "mmlu_eval_accuracy_clinical_knowledge": 0.6206896551724138,
7595
+ "mmlu_eval_accuracy_college_biology": 0.4375,
7596
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7597
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
7598
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7599
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
7600
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
7601
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7602
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7603
+ "mmlu_eval_accuracy_econometrics": 0.25,
7604
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7605
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7606
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
7607
+ "mmlu_eval_accuracy_global_facts": 0.5,
7608
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
7609
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7610
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7611
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7612
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7613
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7614
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7615
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
7616
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7617
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
7618
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7619
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7620
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7621
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7622
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
7623
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7624
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7625
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
7626
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7627
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7628
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7629
+ "mmlu_eval_accuracy_marketing": 0.76,
7630
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
7631
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
7632
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
7633
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
7634
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
7635
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
7636
+ "mmlu_eval_accuracy_prehistory": 0.5714285714285714,
7637
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
7638
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7639
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
7640
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
7641
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7642
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7643
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7644
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
7645
+ "mmlu_eval_accuracy_virology": 0.5,
7646
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7647
+ "mmlu_loss": 1.0658438428652817,
7648
+ "step": 8000
7649
+ },
7650
+ {
7651
+ "epoch": 1.94,
7652
+ "learning_rate": 0.0002,
7653
+ "loss": 0.6641,
7654
+ "step": 8010
7655
+ },
7656
+ {
7657
+ "epoch": 1.94,
7658
+ "learning_rate": 0.0002,
7659
+ "loss": 0.7042,
7660
+ "step": 8020
7661
+ },
7662
+ {
7663
+ "epoch": 1.94,
7664
+ "learning_rate": 0.0002,
7665
+ "loss": 0.69,
7666
+ "step": 8030
7667
+ },
7668
+ {
7669
+ "epoch": 1.95,
7670
+ "learning_rate": 0.0002,
7671
+ "loss": 0.7099,
7672
+ "step": 8040
7673
+ },
7674
+ {
7675
+ "epoch": 1.95,
7676
+ "learning_rate": 0.0002,
7677
+ "loss": 0.7717,
7678
+ "step": 8050
7679
+ },
7680
+ {
7681
+ "epoch": 1.95,
7682
+ "learning_rate": 0.0002,
7683
+ "loss": 0.5697,
7684
+ "step": 8060
7685
+ },
7686
+ {
7687
+ "epoch": 1.95,
7688
+ "learning_rate": 0.0002,
7689
+ "loss": 0.6925,
7690
+ "step": 8070
7691
+ },
7692
+ {
7693
+ "epoch": 1.96,
7694
+ "learning_rate": 0.0002,
7695
+ "loss": 0.6483,
7696
+ "step": 8080
7697
+ },
7698
+ {
7699
+ "epoch": 1.96,
7700
+ "learning_rate": 0.0002,
7701
+ "loss": 0.6367,
7702
+ "step": 8090
7703
+ },
7704
+ {
7705
+ "epoch": 1.96,
7706
+ "learning_rate": 0.0002,
7707
+ "loss": 0.6954,
7708
+ "step": 8100
7709
+ },
7710
+ {
7711
+ "epoch": 1.96,
7712
+ "learning_rate": 0.0002,
7713
+ "loss": 0.656,
7714
+ "step": 8110
7715
+ },
7716
+ {
7717
+ "epoch": 1.97,
7718
+ "learning_rate": 0.0002,
7719
+ "loss": 0.6329,
7720
+ "step": 8120
7721
+ },
7722
+ {
7723
+ "epoch": 1.97,
7724
+ "learning_rate": 0.0002,
7725
+ "loss": 0.707,
7726
+ "step": 8130
7727
+ },
7728
+ {
7729
+ "epoch": 1.97,
7730
+ "learning_rate": 0.0002,
7731
+ "loss": 0.6741,
7732
+ "step": 8140
7733
+ },
7734
+ {
7735
+ "epoch": 1.97,
7736
+ "learning_rate": 0.0002,
7737
+ "loss": 0.6841,
7738
+ "step": 8150
7739
+ },
7740
+ {
7741
+ "epoch": 1.97,
7742
+ "learning_rate": 0.0002,
7743
+ "loss": 0.645,
7744
+ "step": 8160
7745
+ },
7746
+ {
7747
+ "epoch": 1.98,
7748
+ "learning_rate": 0.0002,
7749
+ "loss": 0.6663,
7750
+ "step": 8170
7751
+ },
7752
+ {
7753
+ "epoch": 1.98,
7754
+ "learning_rate": 0.0002,
7755
+ "loss": 0.6719,
7756
+ "step": 8180
7757
+ },
7758
+ {
7759
+ "epoch": 1.98,
7760
+ "learning_rate": 0.0002,
7761
+ "loss": 0.6598,
7762
+ "step": 8190
7763
+ },
7764
+ {
7765
+ "epoch": 1.98,
7766
+ "learning_rate": 0.0002,
7767
+ "loss": 0.6704,
7768
+ "step": 8200
7769
+ },
7770
+ {
7771
+ "epoch": 1.98,
7772
+ "eval_loss": 0.7602109313011169,
7773
+ "eval_runtime": 146.9593,
7774
+ "eval_samples_per_second": 6.805,
7775
+ "eval_steps_per_second": 3.402,
7776
+ "step": 8200
7777
+ },
7778
+ {
7779
+ "epoch": 1.98,
7780
+ "mmlu_eval_accuracy": 0.5005573342840559,
7781
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
7782
+ "mmlu_eval_accuracy_anatomy": 0.7857142857142857,
7783
+ "mmlu_eval_accuracy_astronomy": 0.5,
7784
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7785
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
7786
+ "mmlu_eval_accuracy_college_biology": 0.4375,
7787
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7788
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7789
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7790
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7791
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
7792
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
7793
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7794
+ "mmlu_eval_accuracy_econometrics": 0.25,
7795
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7796
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
7797
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7798
+ "mmlu_eval_accuracy_global_facts": 0.5,
7799
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
7800
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7801
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7802
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7803
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7804
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7805
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7806
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3103448275862069,
7807
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
7808
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
7809
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
7810
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7811
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7812
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7813
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
7814
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7815
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7816
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
7817
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7818
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7819
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7820
+ "mmlu_eval_accuracy_marketing": 0.76,
7821
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
7822
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
7823
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
7824
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
7825
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7826
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
7827
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7828
+ "mmlu_eval_accuracy_professional_accounting": 0.1935483870967742,
7829
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7830
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
7831
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
7832
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7833
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
7834
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7835
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
7836
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
7837
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7838
+ "mmlu_loss": 1.098324341538992,
7839
+ "step": 8200
7840
  }
7841
  ],
7842
  "max_steps": 10000,
7843
  "num_train_epochs": 3,
7844
+ "total_flos": 9.085460254850089e+17,
7845
  "trial_name": null,
7846
  "trial_params": null
7847
  }
{checkpoint-6200 β†’ checkpoint-8200}/training_args.bin RENAMED
File without changes