sbhavy commited on
Commit
a75003d
·
1 Parent(s): 38833d3

End of training

Browse files
README.md CHANGED
@@ -34,12 +34,12 @@ More information needed
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 2e-05
37
- - train_batch_size: 4
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: linear
42
- - num_epochs: 35
43
  - mixed_precision_training: Native AMP
44
 
45
  ### Training results
@@ -48,7 +48,7 @@ The following hyperparameters were used during training:
48
 
49
  ### Framework versions
50
 
51
- - Transformers 4.29.1
52
- - Pytorch 2.0.0+cu118
53
  - Datasets 2.12.0
54
  - Tokenizers 0.13.3
 
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 2e-05
37
+ - train_batch_size: 8
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: linear
42
+ - num_epochs: 25
43
  - mixed_precision_training: Native AMP
44
 
45
  ### Training results
 
48
 
49
  ### Framework versions
50
 
51
+ - Transformers 4.29.2
52
+ - Pytorch 2.0.1+cu118
53
  - Datasets 2.12.0
54
  - Tokenizers 0.13.3
config.json CHANGED
@@ -83,7 +83,7 @@
83
  "top_p": 1.0,
84
  "torch_dtype": null,
85
  "torchscript": false,
86
- "transformers_version": "4.29.1",
87
  "typical_p": 1.0,
88
  "use_bfloat16": false,
89
  "use_cache": true,
@@ -180,7 +180,7 @@
180
  "top_p": 1.0,
181
  "torch_dtype": null,
182
  "torchscript": false,
183
- "transformers_version": "4.29.1",
184
  "typical_p": 1.0,
185
  "use_absolute_embeddings": false,
186
  "use_bfloat16": false,
 
83
  "top_p": 1.0,
84
  "torch_dtype": null,
85
  "torchscript": false,
86
+ "transformers_version": "4.29.2",
87
  "typical_p": 1.0,
88
  "use_bfloat16": false,
89
  "use_cache": true,
 
180
  "top_p": 1.0,
181
  "torch_dtype": null,
182
  "torchscript": false,
183
+ "transformers_version": "4.29.2",
184
  "typical_p": 1.0,
185
  "use_absolute_embeddings": false,
186
  "use_bfloat16": false,
generation_config.json CHANGED
@@ -4,5 +4,5 @@
4
  "eos_token_id": 2,
5
  "forced_eos_token_id": 2,
6
  "pad_token_id": 1,
7
- "transformers_version": "4.29.1"
8
  }
 
4
  "eos_token_id": 2,
5
  "forced_eos_token_id": 2,
6
  "pad_token_id": 1,
7
+ "transformers_version": "4.29.2"
8
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8506b29381045ebcf069432e48e29a2349c03d68d1fb35afb368221b64af1659
3
  size 809187097
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74bb2892b4b795d251b1dd67d8457e39290a959e604d4a55eb3d560ef9480425
3
  size 809187097
runs/May26_16-11-13_e6352949b7c3/1685117478.2330582/events.out.tfevents.1685117478.e6352949b7c3.264.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b94961b339772255d69fab83cb39cc83a7929aec5bd2df2db9c201b195c17a2
3
+ size 6199
runs/May26_16-11-13_e6352949b7c3/events.out.tfevents.1685117478.e6352949b7c3.264.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d234296a207cabaea8e1fc01b2ff266e8a4cfc583a4c0107d4aefd1c7467339f
3
+ size 9371
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ed1d8987a5951f9346d8e070d67d81bc75be6a6e4913e7f48d4ff088b3a6831
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da784a8292b362ac667d70d168eb5fd34881899c5651b606f60c4d0d5b2d08d
3
  size 4091