TeamBlueEdifai commited on
Commit
55d9d36
·
verified ·
1 Parent(s): 202fe19

End of training

Browse files
Files changed (4) hide show
  1. README.md +26 -24
  2. config.json +41 -15
  3. model.safetensors +2 -2
  4. training_args.bin +2 -2
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: transformers
3
  license: apache-2.0
4
- base_model: distilbert-base-uncased
5
  tags:
6
  - generated_from_trainer
7
  metrics:
@@ -19,13 +19,15 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  # results
21
 
22
- This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.2282
25
- - Accuracy: 0.906
26
- - F1: 0.9052
27
- - Precision: 0.9048
28
- - Recall: 0.906
 
 
29
 
30
  ## Model description
31
 
@@ -48,30 +50,30 @@ The following hyperparameters were used during training:
48
  - train_batch_size: 32
49
  - eval_batch_size: 32
50
  - seed: 42
51
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: linear
53
  - num_epochs: 10
54
  - mixed_precision_training: Native AMP
55
 
56
  ### Training results
57
 
58
- | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall |
59
- |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|
60
- | No log | 1.0 | 79 | 0.2501 | 0.904 | 0.9035 | 0.9032 | 0.904 |
61
- | 0.3965 | 2.0 | 158 | 0.2282 | 0.906 | 0.9052 | 0.9048 | 0.906 |
62
- | 0.1925 | 3.0 | 237 | 0.2596 | 0.9 | 0.9 | 0.9 | 0.9 |
63
- | 0.1348 | 4.0 | 316 | 0.3635 | 0.89 | 0.8901 | 0.8903 | 0.89 |
64
- | 0.1348 | 5.0 | 395 | 0.4710 | 0.88 | 0.8834 | 0.8937 | 0.88 |
65
- | 0.0627 | 6.0 | 474 | 0.4220 | 0.894 | 0.8928 | 0.8923 | 0.894 |
66
- | 0.038 | 7.0 | 553 | 0.4292 | 0.898 | 0.8969 | 0.8964 | 0.898 |
67
- | 0.0204 | 8.0 | 632 | 0.4625 | 0.894 | 0.8941 | 0.8943 | 0.894 |
68
- | 0.0148 | 9.0 | 711 | 0.4741 | 0.896 | 0.8950 | 0.8945 | 0.896 |
69
- | 0.0148 | 10.0 | 790 | 0.4743 | 0.896 | 0.8952 | 0.8948 | 0.896 |
70
 
71
 
72
  ### Framework versions
73
 
74
- - Transformers 4.44.2
75
- - Pytorch 2.5.0+cu121
76
- - Datasets 3.1.0
77
- - Tokenizers 0.19.1
 
1
  ---
2
  library_name: transformers
3
  license: apache-2.0
4
+ base_model: bert-base-uncased
5
  tags:
6
  - generated_from_trainer
7
  metrics:
 
19
 
20
  # results
21
 
22
+ This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 1.8481
25
+ - Accuracy: 0.425
26
+ - F1: 0.4068
27
+ - Precision: 0.4371
28
+ - Recall: 0.425
29
+ - Mse: 5.314
30
+ - Mae: 1.37
31
 
32
  ## Model description
33
 
 
50
  - train_batch_size: 32
51
  - eval_batch_size: 32
52
  - seed: 42
53
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
54
  - lr_scheduler_type: linear
55
  - num_epochs: 10
56
  - mixed_precision_training: Native AMP
57
 
58
  ### Training results
59
 
60
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | Mse | Mae |
61
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|:------:|:-----:|
62
+ | 1.9914 | 1.0 | 157 | 1.7086 | 0.404 | 0.2561 | 0.3800 | 0.404 | 10.332 | 1.95 |
63
+ | 1.5651 | 2.0 | 314 | 1.6295 | 0.419 | 0.3343 | 0.4048 | 0.419 | 7.397 | 1.591 |
64
+ | 1.3878 | 3.0 | 471 | 1.6456 | 0.421 | 0.3666 | 0.4605 | 0.421 | 6.147 | 1.473 |
65
+ | 1.1967 | 4.0 | 628 | 1.7054 | 0.42 | 0.3790 | 0.3598 | 0.42 | 5.874 | 1.44 |
66
+ | 1.1002 | 5.0 | 785 | 1.7713 | 0.414 | 0.3896 | 0.3701 | 0.414 | 5.647 | 1.419 |
67
+ | 0.9412 | 6.0 | 942 | 1.8481 | 0.425 | 0.4068 | 0.4371 | 0.425 | 5.314 | 1.37 |
68
+ | 0.8737 | 7.0 | 1099 | 1.9534 | 0.407 | 0.4007 | 0.4025 | 0.407 | 5.141 | 1.375 |
69
+ | 0.757 | 8.0 | 1256 | 2.0153 | 0.401 | 0.3932 | 0.3918 | 0.401 | 5.227 | 1.385 |
70
+ | 0.6973 | 9.0 | 1413 | 2.0556 | 0.404 | 0.3979 | 0.4004 | 0.404 | 5.176 | 1.376 |
71
+ | 0.6573 | 10.0 | 1570 | 2.0672 | 0.408 | 0.4008 | 0.4003 | 0.408 | 5.179 | 1.373 |
72
 
73
 
74
  ### Framework versions
75
 
76
+ - Transformers 4.46.3
77
+ - Pytorch 2.5.1+cu121
78
+ - Datasets 3.2.0
79
+ - Tokenizers 0.20.3
config.json CHANGED
@@ -1,25 +1,51 @@
1
  {
2
- "_name_or_path": "distilbert-base-uncased",
3
- "activation": "gelu",
4
  "architectures": [
5
- "DistilBertForSequenceClassification"
6
  ],
7
- "attention_dropout": 0,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "hidden_dim": 3072,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  "initializer_range": 0.02,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  "max_position_embeddings": 512,
13
- "model_type": "distilbert",
14
- "n_heads": 12,
15
- "n_layers": 6,
16
  "pad_token_id": 0,
 
17
  "problem_type": "single_label_classification",
18
- "qa_dropout": 0.1,
19
- "seq_classif_dropout": 0.2,
20
- "sinusoidal_pos_embds": false,
21
- "tie_weights_": true,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.44.2",
 
 
24
  "vocab_size": 30522
25
  }
 
1
  {
2
+ "_name_or_path": "bert-base-uncased",
 
3
  "architectures": [
4
+ "BertForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4",
18
+ "5": "LABEL_5",
19
+ "6": "LABEL_6",
20
+ "7": "LABEL_7",
21
+ "8": "LABEL_8",
22
+ "9": "LABEL_9"
23
+ },
24
  "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "LABEL_0": 0,
28
+ "LABEL_1": 1,
29
+ "LABEL_2": 2,
30
+ "LABEL_3": 3,
31
+ "LABEL_4": 4,
32
+ "LABEL_5": 5,
33
+ "LABEL_6": 6,
34
+ "LABEL_7": 7,
35
+ "LABEL_8": 8,
36
+ "LABEL_9": 9
37
+ },
38
+ "layer_norm_eps": 1e-12,
39
  "max_position_embeddings": 512,
40
+ "model_type": "bert",
41
+ "num_attention_heads": 12,
42
+ "num_hidden_layers": 12,
43
  "pad_token_id": 0,
44
+ "position_embedding_type": "absolute",
45
  "problem_type": "single_label_classification",
 
 
 
 
46
  "torch_dtype": "float32",
47
+ "transformers_version": "4.46.3",
48
+ "type_vocab_size": 2,
49
+ "use_cache": true,
50
  "vocab_size": 30522
51
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9118ca147ab9269bedf71ff821d0d2c37eaae6297df4ffec7d7b3fbd2f5b2ffc
3
- size 267832560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb22add8717267db6ae44f80a22279062a6ec4439e3897548f553597633c87fc
3
+ size 437983256
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d50397049eb65ce8bae6bf364635057828e3d21e1d7cd3fbad437deb89cb76f0
3
- size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c2530b4b26f976a6c549f19eb5ada7a0fd13969a722ffe1c41476f0f80de978
3
+ size 5240