MariaFGI commited on
Commit
4f864e4
·
verified ·
1 Parent(s): 33fa32a

Training in progress, step 189

Browse files
README.md CHANGED
@@ -1,17 +1,17 @@
1
  ---
2
- base_model: mistralai/Mistral-7B-v0.3
3
  library_name: transformers
4
  model_name: fine_tune_e2e
5
  tags:
6
  - generated_from_trainer
7
- - trl
8
  - sft
 
9
  licence: license
10
  ---
11
 
12
  # Model Card for fine_tune_e2e
13
 
14
- This model is a fine-tuned version of [mistralai/Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
1
  ---
2
+ base_model: gpt2
3
  library_name: transformers
4
  model_name: fine_tune_e2e
5
  tags:
6
  - generated_from_trainer
 
7
  - sft
8
+ - trl
9
  licence: license
10
  ---
11
 
12
  # Model Card for fine_tune_e2e
13
 
14
+ This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "mistralai/Mistral-7B-v0.3",
5
  "bias": "none",
6
  "corda_config": null,
7
  "eva_config": null,
@@ -15,18 +15,17 @@
15
  "loftq_config": {},
16
  "lora_alpha": 16,
17
  "lora_bias": false,
18
- "lora_dropout": 0.05,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
  "qalora_group_size": 16,
24
- "r": 4,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
- "q_proj",
29
- "v_proj"
30
  ],
31
  "target_parameters": null,
32
  "task_type": "CAUSAL_LM",
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "gpt2",
5
  "bias": "none",
6
  "corda_config": null,
7
  "eva_config": null,
 
15
  "loftq_config": {},
16
  "lora_alpha": 16,
17
  "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
  "qalora_group_size": 16,
24
+ "r": 8,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "c_attn"
 
29
  ],
30
  "target_parameters": null,
31
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bf34fde6325e2ba35addc42a9f03f5d4b1b2d2cd81085c3308703464ff94435
3
- size 6832600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c3a69c79de0c4938e9b6c03e33590efccfac2dea2b5068274b2cba0826b8b46
3
+ size 1182680
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1,23 +1,5 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "unk_token": {
17
- "content": "<unk>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- }
23
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34a17f31cdbc6029c272d8a023e589524ede2109a16cd835dfc7c15858bb5ca8
3
- size 6225
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:194a9386546a5476c11b86d7605eea2c8e41a2001f4d4b6aa2e145a367827a0c
3
+ size 6161
vocab.json ADDED
The diff for this file is too large to render. See raw diff