OGrohit commited on
Commit
cc3d26e
·
verified ·
1 Parent(s): fb0bd21

Upload Qwen2ForCausalLM

Browse files
Files changed (4) hide show
  1. README.md +1 -0
  2. config.json +81 -40
  3. generation_config.json +13 -7
  4. model.safetensors +2 -2
README.md CHANGED
@@ -7,6 +7,7 @@ tags:
7
  - incident-triage
8
  - grpo
9
  - openenv
 
10
  ---
11
 
12
  # LogTriageEnv SRE Agent
 
7
  - incident-triage
8
  - grpo
9
  - openenv
10
+ - trl
11
  ---
12
 
13
  # LogTriageEnv SRE Agent
config.json CHANGED
@@ -1,40 +1,81 @@
1
- {
2
- "architectures": [
3
- "LlamaForCausalLM"
4
- ],
5
- "attention_bias": false,
6
- "attention_dropout": 0.0,
7
- "bos_token_id": 1,
8
- "dtype": "float16",
9
- "eos_token_id": 2,
10
- "head_dim": 64,
11
- "hidden_act": "silu",
12
- "hidden_size": 960,
13
- "initializer_range": 0.02,
14
- "intermediate_size": 2560,
15
- "is_llama_config": true,
16
- "max_position_embeddings": 8192,
17
- "mlp_bias": false,
18
- "model_type": "llama",
19
- "num_attention_heads": 15,
20
- "num_hidden_layers": 32,
21
- "num_key_value_heads": 5,
22
- "pad_token_id": 2,
23
- "pretraining_tp": 1,
24
- "rms_norm_eps": 1e-05,
25
- "rope_interleaved": false,
26
- "rope_parameters": {
27
- "rope_theta": 100000,
28
- "rope_type": "default"
29
- },
30
- "tie_word_embeddings": true,
31
- "transformers.js_config": {
32
- "kv_cache_dtype": {
33
- "fp16": "float16",
34
- "q4f16": "float16"
35
- }
36
- },
37
- "transformers_version": "5.5.4",
38
- "use_cache": true,
39
- "vocab_size": 49152
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "dtype": "float32",
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 2048,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "layer_types": [
13
+ "full_attention",
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention"
49
+ ],
50
+ "max_position_embeddings": 32768,
51
+ "max_window_layers": 70,
52
+ "model_type": "qwen2",
53
+ "num_attention_heads": 16,
54
+ "num_hidden_layers": 36,
55
+ "num_key_value_heads": 2,
56
+ "pad_token_id": 151643,
57
+ "quantization_config": {
58
+ "_load_in_4bit": true,
59
+ "_load_in_8bit": false,
60
+ "bnb_4bit_compute_dtype": "float16",
61
+ "bnb_4bit_quant_storage": "uint8",
62
+ "bnb_4bit_quant_type": "nf4",
63
+ "bnb_4bit_use_double_quant": true,
64
+ "llm_int8_enable_fp32_cpu_offload": false,
65
+ "llm_int8_has_fp16_weight": false,
66
+ "llm_int8_skip_modules": null,
67
+ "llm_int8_threshold": 6.0,
68
+ "load_in_4bit": true,
69
+ "load_in_8bit": false,
70
+ "quant_method": "bitsandbytes"
71
+ },
72
+ "rms_norm_eps": 1e-06,
73
+ "rope_scaling": null,
74
+ "rope_theta": 1000000.0,
75
+ "sliding_window": null,
76
+ "tie_word_embeddings": true,
77
+ "transformers_version": "4.57.2",
78
+ "use_cache": false,
79
+ "use_sliding_window": false,
80
+ "vocab_size": 151936
81
+ }
generation_config.json CHANGED
@@ -1,7 +1,13 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 1,
4
- "eos_token_id": 2,
5
- "pad_token_id": 2,
6
- "transformers_version": "5.5.4"
7
- }
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": [
4
+ 151645,
5
+ 151643
6
+ ],
7
+ "pad_token_id": 151643,
8
+ "repetition_penalty": 1.05,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.57.2"
13
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65e89db9aa07488d31f7ad5e52864fc0637a7cbba105aa39e471237dda6a5fd9
3
- size 723674624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:723fcfe80236157dfd789383900ac42d878da5ad4a8fa1f19dd8f07185beda94
3
+ size 2677445249