SykoSLM commited on
Commit
3dc2846
·
verified ·
1 Parent(s): f7b2977

Upload folder using huggingface_hub

Browse files
__pycache__/modeling_sykoslm.cpython-312.pyc ADDED
Binary file (12.4 kB). View file
 
abstract_head.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b1852603d3043c5282f6d98a44a04c8fbdd3e88b9ecab55c3c729d3f526fed3
3
+ size 1846896
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SykoLLM_CMN"
4
+ ],
5
+ "model_type": "sykollm",
6
+ "vocab_size": 32000,
7
+ "d_model": 768,
8
+ "n_heads": 6,
9
+ "n_layers": 24,
10
+ "chunk_size": 128,
11
+ "intermediate_size": 3072,
12
+ "num_memory_tokens": 16,
13
+ "context_size": 1024,
14
+ "overlap_size": 16,
15
+ "code_overlap_size": 64,
16
+ "abstract_head_hidden": 256,
17
+ "abstract_head_layers": 2,
18
+ "token_loss_weight": 0.7,
19
+ "abstract_loss_weight": 0.3,
20
+ "bos_token_id": 2,
21
+ "eos_token_id": 3,
22
+ "pad_token_id": 0
23
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d838ef9c350edea93abf5b24735c638a5b69ad7dd23531b5e2e4bdc9764b51c8
3
+ size 884045808
modeling_sykoslm.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from transformers import PreTrainedModel, PretrainedConfig
5
+
6
+ class SykoSLMConfig(PretrainedConfig):
7
+ model_type = "sykollm"
8
+ def __init__(self, vocab_size=12000, d_model=512, n_layers=18, n_heads=4,
9
+ num_memory_tokens=16, chunk_size=128, context_size=1024,
10
+ overlap_size=16, code_overlap_size=64, abstract_head_hidden=256,
11
+ abstract_head_layers=2, intermediate_size=2048, **kwargs):
12
+ super().__init__(**kwargs)
13
+ self.vocab_size = vocab_size
14
+ self.d_model = d_model
15
+ self.n_layers = n_layers
16
+ self.n_heads = n_heads
17
+ self.num_memory_tokens = num_memory_tokens
18
+ self.chunk_size = chunk_size
19
+ self.context_size = context_size
20
+ self.overlap_size = overlap_size
21
+ self.code_overlap_size = code_overlap_size
22
+ self.abstract_head_hidden = abstract_head_hidden
23
+ self.abstract_head_layers = abstract_head_layers
24
+ self.intermediate_size = intermediate_size
25
+
26
+ def apply_rotary_emb(x, cos, sin):
27
+ cos, sin = cos.to(x.dtype), sin.to(x.dtype)
28
+ d = x.shape[-1]
29
+ x1, x2 = x[..., :d//2], x[..., d//2:]
30
+ return (x * cos) + (torch.cat([-x2, x1], dim=-1) * sin)
31
+
32
+ class SykoRoPE(nn.Module):
33
+ def __init__(self, dim, base=10000.0):
34
+ super().__init__()
35
+ self.dim, self.base = dim, base
36
+ def forward(self, positions):
37
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=positions.device).float() / self.dim))
38
+ freqs = torch.outer(positions.float(), inv_freq)
39
+ emb = torch.cat((freqs, freqs), dim=-1)
40
+ return emb.cos()[None, None, :, :], emb.sin()[None, None, :, :]
41
+
42
+ class SykoAttention(nn.Module):
43
+ def __init__(self, d_model, n_heads):
44
+ super().__init__()
45
+ self.n_heads, self.head_dim = n_heads, d_model // n_heads
46
+ self.qkv = nn.Linear(d_model, d_model * 3, bias=False)
47
+ self.out = nn.Linear(d_model, d_model, bias=False)
48
+ def forward(self, x, cos, sin):
49
+ B, L, D = x.shape
50
+ qkv = self.qkv(x).reshape(B, L, 3, self.n_heads, self.head_dim).permute(2, 0, 3, 1, 4)
51
+ q, k, v = qkv[0], qkv[1], qkv[2]
52
+ q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin)
53
+ out = F.scaled_dot_product_attention(q, k, v, is_causal=True)
54
+ return self.out(out.transpose(1, 2).reshape(B, L, D))
55
+
56
+ class SykoTransformerLayer(nn.Module):
57
+ def __init__(self, d_model, n_heads, intermediate_size):
58
+ super().__init__()
59
+ self.norm1 = nn.LayerNorm(d_model)
60
+ self.attn = SykoAttention(d_model, n_heads)
61
+ self.norm2 = nn.LayerNorm(d_model)
62
+ self.mlp = nn.Sequential(
63
+ nn.Linear(d_model, intermediate_size), nn.GELU(),
64
+ nn.Dropout(0.0),
65
+ nn.Linear(intermediate_size, d_model)
66
+ )
67
+ def forward(self, x, cos, sin):
68
+ x = x + self.attn(self.norm1(x), cos, sin)
69
+ return x + self.mlp(self.norm2(x))
70
+
71
+ class SykoMemoryGate(nn.Module):
72
+ def __init__(self, d_model):
73
+ super().__init__()
74
+ self.forget_linear = nn.Linear(d_model * 2, d_model)
75
+ self.update_linear = nn.Linear(d_model, d_model)
76
+ self.norm = nn.LayerNorm(d_model)
77
+ def forward(self, current_context, prev_memory):
78
+ combined = torch.cat([current_context, prev_memory], dim=-1)
79
+ forget_ratio = torch.sigmoid(self.forget_linear(combined))
80
+ new_candidate = torch.tanh(self.update_linear(current_context))
81
+ return self.norm((forget_ratio * prev_memory) + ((1 - forget_ratio) * new_candidate))
82
+
83
+ class SykoSLM(PreTrainedModel):
84
+ config_class = SykoSLMConfig
85
+ def __init__(self, config):
86
+ super().__init__(config)
87
+ self.mem_tokens = config.num_memory_tokens
88
+ self.d_model = config.d_model
89
+ pad_idx = getattr(config, "pad_token_id", 0) or 0
90
+ self.embedding = nn.Embedding(config.vocab_size, config.d_model, padding_idx=pad_idx)
91
+ self.mem_pos_emb = nn.Embedding(config.num_memory_tokens, config.d_model)
92
+ self.rope = SykoRoPE(config.d_model // config.n_heads)
93
+ self.layers = nn.ModuleList([
94
+ SykoTransformerLayer(config.d_model, config.n_heads, config.intermediate_size)
95
+ for _ in range(config.n_layers)
96
+ ])
97
+ self.final_norm = nn.LayerNorm(config.d_model)
98
+ self.memory_gate = SykoMemoryGate(config.d_model)
99
+ self.fc_out = nn.Linear(config.d_model, config.vocab_size)
100
+
101
+ def forward(self, input_ids, prev_memory=None, chunk_start_idx=0, **kwargs):
102
+ B = input_ids.size(0)
103
+ if prev_memory is None:
104
+ prev_memory = torch.zeros(B, self.mem_tokens, self.d_model, device=input_ids.device)
105
+ x = self.embedding(input_ids)
106
+ mem_idx = torch.arange(self.mem_tokens, device=input_ids.device)
107
+ memory_with_pos = prev_memory + self.mem_pos_emb(mem_idx).unsqueeze(0)
108
+ x_with_memory = torch.cat([memory_with_pos, x], dim=1)
109
+ mem_pos = torch.zeros(self.mem_tokens, dtype=torch.long, device=input_ids.device)
110
+ word_pos = torch.arange(chunk_start_idx, chunk_start_idx + x.size(1), device=input_ids.device)
111
+ cos, sin = self.rope(torch.cat([mem_pos, word_pos]))
112
+ for layer in self.layers:
113
+ x_with_memory = layer(x_with_memory, cos, sin)
114
+ x_with_memory = self.final_norm(x_with_memory)
115
+ memory_output = x_with_memory[:, :self.mem_tokens, :]
116
+ token_outputs = x_with_memory[:, self.mem_tokens:, :]
117
+ return self.fc_out(token_outputs), self.memory_gate(memory_output, prev_memory)
118
+
119
+ def generate_text(self, input_ids, max_new_tokens=100, temperature=0.8, top_k=50):
120
+ self.eval()
121
+ device = input_ids.device
122
+ prev_memory = torch.zeros(1, self.mem_tokens, self.d_model, device=device)
123
+ generated = input_ids.clone()
124
+ with torch.no_grad():
125
+ for _ in range(max_new_tokens):
126
+ chunk = generated[:, -self.config.chunk_size:]
127
+ logits, prev_memory = self.forward(chunk, prev_memory)
128
+ next_logits = logits[:, -1, :] / temperature
129
+ top_k_vals, top_k_idx = torch.topk(next_logits, k=min(top_k, next_logits.size(-1)))
130
+ filtered = torch.full_like(next_logits, float("-inf"))
131
+ filtered.scatter_(1, top_k_idx, top_k_vals)
132
+ next_token = torch.multinomial(torch.softmax(filtered, dim=-1), 1)
133
+ generated = torch.cat([generated, next_token], dim=1)
134
+ eos = getattr(self.config, "eos_token_id", None)
135
+ if eos and next_token.item() == eos:
136
+ break
137
+ return generated
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<bos>",
4
+ "eos_token": "<eos>",
5
+ "model_max_length": 1000000000000000019884624838656,
6
+ "pad_token": "<pad>",
7
+ "tokenizer_class": "TokenizersBackend",
8
+ "unk_token": "<unk>"
9
+ }