alextripplet commited on
Commit
ef3b1ae
·
verified ·
1 Parent(s): 27bc870

Upload config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.json +63 -83
config.json CHANGED
@@ -1,83 +1,63 @@
1
- {
2
- "architectures": [
3
- "Qwen3_5ForCausalLM"
4
- ],
5
- "attention_bias": false,
6
- "attention_dropout": 0.0,
7
- "attn_output_gate": true,
8
- "bos_token_id": null,
9
- "dtype": "bfloat16",
10
- "eos_token_id": 248044,
11
- "full_attention_interval": 4,
12
- "head_dim": 256,
13
- "hidden_act": "silu",
14
- "hidden_size": 4096,
15
- "initializer_range": 0.02,
16
- "intermediate_size": 12288,
17
- "layer_types": [
18
- "linear_attention",
19
- "linear_attention",
20
- "linear_attention",
21
- "full_attention",
22
- "linear_attention",
23
- "linear_attention",
24
- "linear_attention",
25
- "full_attention",
26
- "linear_attention",
27
- "linear_attention",
28
- "linear_attention",
29
- "full_attention",
30
- "linear_attention",
31
- "linear_attention",
32
- "linear_attention",
33
- "full_attention",
34
- "linear_attention",
35
- "linear_attention",
36
- "linear_attention",
37
- "full_attention",
38
- "linear_attention",
39
- "linear_attention",
40
- "linear_attention",
41
- "full_attention",
42
- "linear_attention",
43
- "linear_attention",
44
- "linear_attention",
45
- "full_attention",
46
- "linear_attention",
47
- "linear_attention",
48
- "linear_attention",
49
- "full_attention"
50
- ],
51
- "linear_conv_kernel_dim": 4,
52
- "linear_key_head_dim": 128,
53
- "linear_num_key_heads": 16,
54
- "linear_num_value_heads": 32,
55
- "linear_value_head_dim": 128,
56
- "mamba_ssm_dtype": "float32",
57
- "max_position_embeddings": 262144,
58
- "mlp_only_layers": [],
59
- "model_type": "qwen3_5_text",
60
- "mtp_num_hidden_layers": 1,
61
- "mtp_use_dedicated_embeddings": false,
62
- "num_attention_heads": 16,
63
- "num_hidden_layers": 32,
64
- "num_key_value_heads": 4,
65
- "pad_token_id": null,
66
- "partial_rotary_factor": 0.25,
67
- "rms_norm_eps": 1e-06,
68
- "rope_parameters": {
69
- "mrope_interleaved": true,
70
- "mrope_section": [
71
- 11,
72
- 11,
73
- 10
74
- ],
75
- "partial_rotary_factor": 0.25,
76
- "rope_theta": 10000000,
77
- "rope_type": "default"
78
- },
79
- "tie_word_embeddings": false,
80
- "transformers_version": "5.4.0",
81
- "use_cache": true,
82
- "vocab_size": 248320
83
- }
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ - zh
6
+ - ko
7
+ - ja
8
+ - fr
9
+ - es
10
+ - de
11
+ - it
12
+ - ru
13
+ - ar
14
+ - multilingual
15
+ pipeline_tag: text-generation
16
+ tags:
17
+ - chat
18
+ - suzhou
19
+ - merged
20
+ - reasoning
21
+ - tool-use
22
+ - agent
23
+ library_name: transformers
24
+ base_model:
25
+ - tripplet-research/suzhou3.1
26
+ - Qwen/Qwen2.5-3B-Instruct
27
+ ---
28
+
29
+ # Suzhou 3.2
30
+
31
+ A 12 billion parameter instruction-tuned language model by **Triplet Research**. Suzhou 3.2 is a weighted merge of Suzhou 3.1 and Qwen2.5-3B, designed to improve reasoning and math capabilities.
32
+
33
+ ## Merge Details
34
+
35
+ - **Method**: Weighted blending (70% Suzhou 3.1 + 30% Qwen2.5-3B)
36
+ - **Model A**: Suzhou 3.1 - strong agent/tool-use, reasoning
37
+ - **Model B**: Qwen2.5-3B-Instruct - math reasoning, general knowledge
38
+ - **Target**: 12B parameters
39
+
40
+ ## Key Features
41
+
42
+ - **12B parameters**
43
+ - **262K context window**
44
+ - Strong **reasoning** and **chain-of-thought** capabilities
45
+ - **Tool calling** and **agent** support
46
+ - **Multilingual** support (29+ languages)
47
+ - Mixed attention architecture (linear + full attention layers)
48
+
49
+ ## Architecture
50
+
51
+ - Type: Causal Language Model
52
+ - Architecture: Qwen3.5 Text
53
+ - Layers: 32
54
+ - Parameters: 12B
55
+
56
+ ## Quickstart
57
+
58
+ ```python
59
+ from transformers import AutoModelForCausalLM, AutoTokenizer
60
+
61
+ model = AutoModelForCausalLM.from_pretrained("Triplet-Research/suzhou-3.2")
62
+ tokenizer = AutoTokenizer.from_pretrained("Triplet-Research/suzhou-3.2")
63
+ ```