| """ |
| MAC (Memory-as-Context) 结构集成到 Qwen 的详细实现 |
| |
| === MAC 结构核心原理 === |
| |
| 1. 将长序列分成多个 segment(如每 128 个 token 一个 segment) |
| 2. 在每个 segment 的【开头】插入 longterm_mem_tokens(如 16 个) |
| 3. 这些 memory tokens 会参与 attention 计算 |
| 4. 使用 NeuralMemory 模块来动态更新这些 memory tokens 的内容 |
| |
| 原始序列: [t1, t2, t3, ..., t128, t129, ..., t256, ...] |
| ↓ |
| MAC 序列: [M1..M16, t1...t128, M1..M16, t129...t256, ...] |
| ↑ ↑ |
| memory tokens memory tokens |
| |
| === Qwen2 架构 === |
| |
| Qwen2DecoderLayer: |
| ├── input_layernorm (RMSNorm) |
| ├── self_attn (Qwen2Attention/Qwen2FlashAttention2) |
| │ ├── q_proj, k_proj, v_proj |
| │ ├── RoPE (rotary positional embedding) |
| │ └── o_proj |
| ├── post_attention_layernorm (RMSNorm) |
| └── mlp (Qwen2MLP: gate_proj, up_proj, down_proj with SiLU) |
| |
| 我们需要在特定层添加 NeuralMemory 模块 |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch import Tensor |
| from typing import Optional, Tuple, List, Dict, Any |
| from copy import deepcopy |
| from functools import partial |
|
|
| from einops import rearrange, repeat, pack, unpack |
|
|
| |
| from titans_pytorch import NeuralMemory, MemoryMLP |
| from titans_pytorch.neural_memory import NeuralMemState |
|
|
|
|
| |
| |
| |
|
|
| def exists(v): |
| return v is not None |
|
|
| def default(v, d): |
| return v if exists(v) else d |
|
|
| def divisible_by(num, den): |
| return (num % den) == 0 |
|
|
| def round_up_multiple(seq, mult): |
| return ((seq + mult - 1) // mult) * mult |
|
|
|
|
| |
| |
| |
|
|
| class QwenMACTransformer(nn.Module): |
| """ |
| 将 MAC 结构应用到 Qwen 模型的完整实现 |
| |
| 架构图: |
| |
| Input IDs |
| │ |
| ▼ |
| ┌─────────────────┐ |
| │ Token Embed │ |
| └────────┬────────┘ |
| │ |
| ▼ |
| ┌─────────────────────────────────────────┐ |
| │ 插入 Memory Tokens 到每个 Segment 开头 │ |
| │ [M1..Mn, t1..t_seg, M1..Mn, ...] │ |
| └────────┬────────────────────────────────┘ |
| │ |
| ▼ |
| ╔═════════════════════════════════════════╗ |
| ║ Qwen Decoder Layer 1 ║ |
| ║ ┌────────────────────────────────┐ ║ |
| ║ │ RMSNorm → Self-Attention → Add │ ║ |
| ║ └────────────────────────────────┘ ║ |
| ║ ┌────────────────────────────────┐ ║ |
| ║ │ RMSNorm → MLP → Add │ ║ |
| ║ └────────────────────────────────┘ ║ |
| ╚═════════════════════════════════════════╝ |
| │ |
| ▼ |
| ╔═════════════════════════════════════════╗ |
| ║ Qwen Decoder Layer 2 (with Memory) ║ |
| ║ ┌────────────────────────────────┐ ║ |
| ║ │ RMSNorm → Self-Attention → Add │ ║ |
| ║ └────────────────────────────────┘ ║ |
| ║ ┌──────────────────────────────────┐ ║ |
| ║ │ ★ NeuralMemory 记忆增强 ★ │ ║ |
| ║ │ retrieved = mem(hidden_states) │ ║ |
| ║ │ hidden += gate * retrieved │ ║ |
| ║ └──────────────────────────────────┘ ║ |
| ║ ┌────────────────────────────────┐ ║ |
| ║ │ RMSNorm → MLP → Add │ ║ |
| ║ └────────────────────────────────┘ ║ |
| ╚═════════════════════════════════════════╝ |
| │ |
| ▼ |
| ... 更多层 ... |
| │ |
| ▼ |
| ┌─────────────────┐ |
| │ Final RMSNorm │ |
| └────────┬────────┘ |
| │ |
| ▼ |
| ┌─────────────────┐ |
| │ LM Head │ |
| └────────┬────────┘ |
| │ |
| ▼ |
| Logits |
| """ |
| |
| def __init__( |
| self, |
| qwen_model, |
| |
| segment_len: int = 128, |
| num_longterm_mem_tokens: int = 16, |
| num_persist_mem_tokens: int = 4, |
| |
| |
| neural_memory_layers: Tuple[int, ...] = (2, 4, 6), |
| memory_chunk_size: int = 64, |
| memory_batch_size: int = 128, |
| memory_depth: int = 2, |
| |
| |
| dim_head: int = 64, |
| num_heads: int = None, |
| use_momentum: bool = True, |
| gate_memory_output: bool = False, |
| ): |
| super().__init__() |
| |
| |
| self.qwen = qwen_model |
| self.config = qwen_model.config |
| |
| |
| self.hidden_size = self.config.hidden_size |
| self.num_layers = self.config.num_hidden_layers |
| num_heads = default(num_heads, self.hidden_size // dim_head) |
| |
| |
| self.segment_len = segment_len |
| self.num_longterm_mem_tokens = num_longterm_mem_tokens |
| self.num_persist_mem_tokens = num_persist_mem_tokens |
| self.total_segment_len = segment_len + num_longterm_mem_tokens |
| |
| |
| |
| |
| |
| |
| |
| self.persist_mem_tokens = nn.Parameter( |
| torch.randn(num_persist_mem_tokens, self.hidden_size) * 0.02 |
| ) |
| |
| |
| |
| self.longterm_mem_tokens = nn.Parameter( |
| torch.randn(num_longterm_mem_tokens, self.hidden_size) * 0.02 |
| ) |
| |
| |
| |
| |
| |
| self.neural_memory_layers = neural_memory_layers |
| self.gate_memory_output = gate_memory_output |
| |
| |
| self.neural_memories = nn.ModuleDict() |
| self.memory_projections = nn.ModuleDict() |
| self.memory_gates = nn.ModuleDict() |
| |
| |
| memory_model_template = MemoryMLP( |
| dim=dim_head, |
| depth=memory_depth, |
| expansion_factor=2.0 |
| ) |
| |
| for layer_idx in neural_memory_layers: |
| layer_key = str(layer_idx) |
| |
| |
| self.neural_memories[layer_key] = NeuralMemory( |
| dim=self.hidden_size, |
| chunk_size=memory_chunk_size, |
| batch_size=memory_batch_size, |
| dim_head=dim_head, |
| heads=num_heads, |
| model=deepcopy(memory_model_template), |
| momentum=use_momentum, |
| momentum_order=1, |
| qk_rmsnorm=True, |
| pre_rmsnorm=True, |
| default_step_transform_max_lr=0.1, |
| ) |
| |
| |
| self.memory_gates[layer_key] = nn.Sequential( |
| nn.Linear(self.hidden_size, self.hidden_size), |
| nn.Sigmoid() |
| ) |
| |
| print(f"[QwenMAC] 初始化完成:") |
| print(f" - 隐藏层大小: {self.hidden_size}") |
| print(f" - 层数: {self.num_layers}") |
| print(f" - Segment 长度: {segment_len}") |
| print(f" - Longterm Memory Tokens: {num_longterm_mem_tokens}") |
| print(f" - Persist Memory Tokens: {num_persist_mem_tokens}") |
| print(f" - 记忆层: {neural_memory_layers}") |
| |
| def _insert_memory_tokens( |
| self, |
| hidden_states: Tensor, |
| batch_size: int, |
| seq_len: int, |
| ) -> Tuple[Tensor, int]: |
| """ |
| 在序列中插入 memory tokens |
| |
| 输入: [batch, seq_len, hidden] |
| 输出: [batch, new_seq_len, hidden] |
| |
| 处理流程: |
| 原始: [t1, t2, ..., t128, t129, ..., t256] |
| |
| 1. 分成 segments: |
| Seg1: [t1, ..., t128] |
| Seg2: [t129, ..., t256] |
| |
| 2. 每个 segment 前插入 longterm_mem: |
| Seg1: [M1, ..., M16, t1, ..., t128] |
| Seg2: [M1, ..., M16, t129, ..., t256] |
| |
| 3. 合并 + 前置 persist_mem: |
| [P1, ..., P4, M1..M16, t1..t128, M1..M16, t129..t256] |
| """ |
| segment_len = self.segment_len |
| num_longterm = self.num_longterm_mem_tokens |
| num_persist = self.num_persist_mem_tokens |
| |
| |
| num_segments = (seq_len + segment_len - 1) // segment_len |
| |
| |
| padded_len = num_segments * segment_len |
| if seq_len < padded_len: |
| padding = padded_len - seq_len |
| hidden_states = F.pad(hidden_states, (0, 0, 0, padding)) |
| |
| |
| hidden_states = rearrange( |
| hidden_states, |
| 'b (s n) d -> b s n d', |
| s=num_segments, |
| n=segment_len |
| ) |
| |
| |
| longterm_mem = repeat( |
| self.longterm_mem_tokens, |
| 'n d -> b s n d', |
| b=batch_size, |
| s=num_segments |
| ) |
| |
| |
| |
| hidden_states = torch.cat([longterm_mem, hidden_states], dim=2) |
| |
| |
| hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d') |
| |
| |
| persist_mem = repeat( |
| self.persist_mem_tokens, |
| 'n d -> b n d', |
| b=batch_size |
| ) |
| hidden_states = torch.cat([persist_mem, hidden_states], dim=1) |
| |
| new_seq_len = hidden_states.shape[1] |
| |
| return hidden_states, new_seq_len |
| |
| def _remove_memory_tokens( |
| self, |
| hidden_states: Tensor, |
| original_seq_len: int, |
| ) -> Tensor: |
| """ |
| 从输出中移除 memory tokens,恢复原始序列长度 |
| """ |
| segment_len = self.segment_len |
| num_longterm = self.num_longterm_mem_tokens |
| num_persist = self.num_persist_mem_tokens |
| total_segment_len = segment_len + num_longterm |
| |
| batch_size = hidden_states.shape[0] |
| |
| |
| hidden_states = hidden_states[:, num_persist:] |
| |
| |
| num_segments = (original_seq_len + segment_len - 1) // segment_len |
| |
| |
| hidden_states = rearrange( |
| hidden_states, |
| 'b (s n) d -> b s n d', |
| s=num_segments, |
| n=total_segment_len |
| ) |
| |
| |
| hidden_states = hidden_states[:, :, num_longterm:, :] |
| |
| |
| hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d') |
| hidden_states = hidden_states[:, :original_seq_len, :] |
| |
| return hidden_states |
| |
| def _create_mac_attention_mask( |
| self, |
| seq_len_with_mem: int, |
| device: torch.device, |
| dtype: torch.dtype, |
| ) -> Tensor: |
| """ |
| 创建 MAC 风格的 attention mask |
| |
| MAC mask 的特点: |
| 1. Persist memory tokens 对所有位置可见 |
| 2. 每个 segment 内部是 causal 的 |
| 3. Memory tokens 可以 attend 到之前的 segment |
| |
| 这是一个简化版本,完整版需要考虑更多细节 |
| """ |
| |
| mask = torch.ones(seq_len_with_mem, seq_len_with_mem, device=device, dtype=dtype) |
| mask = torch.tril(mask) |
| |
| |
| num_persist = self.num_persist_mem_tokens |
| mask[:, :num_persist] = 1.0 |
| |
| return mask |
| |
| def forward( |
| self, |
| input_ids: Tensor, |
| attention_mask: Optional[Tensor] = None, |
| position_ids: Optional[Tensor] = None, |
| memory_states: Optional[Dict[str, NeuralMemState]] = None, |
| return_memory_states: bool = True, |
| **kwargs |
| ) -> Dict[str, Any]: |
| """ |
| 前向传播 |
| |
| Args: |
| input_ids: [batch, seq_len] |
| attention_mask: [batch, seq_len] |
| memory_states: 各层的记忆状态(用于增量推理) |
| |
| Returns: |
| dict with 'logits', 'hidden_states', 'memory_states' |
| """ |
| batch_size, seq_len = input_ids.shape |
| device = input_ids.device |
| |
| |
| |
| |
| if hasattr(self.qwen.model, 'embed_tokens'): |
| hidden_states = self.qwen.model.embed_tokens(input_ids) |
| else: |
| hidden_states = self.qwen.get_input_embeddings()(input_ids) |
| |
| |
| |
| |
| hidden_states, seq_len_with_mem = self._insert_memory_tokens( |
| hidden_states, batch_size, seq_len |
| ) |
| |
| |
| |
| |
| |
| |
| mac_attention_mask = None |
| |
| |
| |
| |
| if memory_states is None: |
| memory_states = {} |
| next_memory_states = {} |
| |
| |
| for layer_idx, layer in enumerate(self.qwen.model.layers): |
| layer_key = str(layer_idx) |
| |
| |
| |
| |
| |
| |
| residual = hidden_states |
| |
| |
| hidden_states = layer.input_layernorm(hidden_states) |
| |
| |
| |
| attn_output = layer.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=mac_attention_mask, |
| position_ids=None, |
| ) |
| |
| |
| if isinstance(attn_output, tuple): |
| attn_output = attn_output[0] |
| |
| hidden_states = residual + attn_output |
| |
| |
| |
| |
| if layer_key in self.neural_memories: |
| neural_mem = self.neural_memories[layer_key] |
| gate_fn = self.memory_gates[layer_key] |
| |
| |
| mem_state = memory_states.get(layer_key) |
| |
| |
| retrieved, next_mem_state = neural_mem( |
| hidden_states, |
| state=mem_state |
| ) |
| |
| |
| gate = gate_fn(hidden_states) |
| |
| if self.gate_memory_output: |
| |
| hidden_states = hidden_states * (1 + gate * retrieved.sigmoid()) |
| else: |
| |
| hidden_states = hidden_states + gate * retrieved |
| |
| |
| next_memory_states[layer_key] = next_mem_state |
| |
| |
| |
| |
| residual = hidden_states |
| hidden_states = layer.post_attention_layernorm(hidden_states) |
| hidden_states = layer.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
| |
| |
| |
| |
| hidden_states = self.qwen.model.norm(hidden_states) |
| |
| |
| |
| |
| hidden_states = self._remove_memory_tokens(hidden_states, seq_len) |
| |
| |
| |
| |
| logits = self.qwen.lm_head(hidden_states) |
| |
| |
| |
| |
| result = { |
| 'logits': logits, |
| 'hidden_states': hidden_states, |
| } |
| |
| if return_memory_states: |
| result['memory_states'] = next_memory_states |
| |
| return result |
| |
| def generate( |
| self, |
| input_ids: Tensor, |
| max_new_tokens: int = 100, |
| temperature: float = 1.0, |
| top_p: float = 0.9, |
| memory_states: Optional[Dict] = None, |
| **kwargs |
| ) -> Tensor: |
| """ |
| 简单的生成函数 |
| """ |
| generated = input_ids.clone() |
| |
| for _ in range(max_new_tokens): |
| outputs = self.forward( |
| generated, |
| memory_states=memory_states, |
| return_memory_states=True |
| ) |
| |
| logits = outputs['logits'][:, -1, :] |
| memory_states = outputs['memory_states'] |
| |
| |
| if temperature > 0: |
| probs = F.softmax(logits / temperature, dim=-1) |
| next_token = torch.multinomial(probs, num_samples=1) |
| else: |
| next_token = logits.argmax(dim=-1, keepdim=True) |
| |
| generated = torch.cat([generated, next_token], dim=-1) |
| |
| |
| if hasattr(self.config, 'eos_token_id'): |
| if (next_token == self.config.eos_token_id).all(): |
| break |
| |
| return generated |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| """ |
| 完整的使用示例 |
| """ |
| print("=" * 70) |
| print("MAC (Memory-as-Context) 集成到 Qwen 的示例") |
| print("=" * 70) |
| |
| |
| |
| |
| |
| try: |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| print("\n[1] 加载 Qwen 模型...") |
| model_name = "Qwen/Qwen2-0.5B" |
| |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_name, |
| trust_remote_code=True |
| ) |
| |
| qwen_model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
| device_map="auto" if torch.cuda.is_available() else None, |
| trust_remote_code=True |
| ) |
| |
| print(f" 模型配置:") |
| print(f" - hidden_size: {qwen_model.config.hidden_size}") |
| print(f" - num_layers: {qwen_model.config.num_hidden_layers}") |
| print(f" - num_heads: {qwen_model.config.num_attention_heads}") |
| |
| |
| print("\n[2] 创建 QwenMAC 模型...") |
| mac_model = QwenMACTransformer( |
| qwen_model=qwen_model, |
| segment_len=64, |
| num_longterm_mem_tokens=8, |
| num_persist_mem_tokens=4, |
| neural_memory_layers=(1, 3, 5), |
| memory_chunk_size=32, |
| memory_batch_size=64, |
| ) |
| |
| if torch.cuda.is_available(): |
| mac_model = mac_model.cuda() |
| |
| |
| print("\n[3] 测试前向传播...") |
| test_text = "人工智能正在改变世界,它可以" |
| inputs = tokenizer(test_text, return_tensors="pt") |
| |
| device = next(mac_model.parameters()).device |
| input_ids = inputs.input_ids.to(device) |
| |
| with torch.no_grad(): |
| outputs = mac_model(input_ids) |
| |
| print(f" 输入形状: {input_ids.shape}") |
| print(f" 输出 logits 形状: {outputs['logits'].shape}") |
| print(f" 记忆状态数量: {len(outputs['memory_states'])}") |
| |
| |
| print("\n[4] 测试文本生成...") |
| with torch.no_grad(): |
| generated = mac_model.generate( |
| input_ids, |
| max_new_tokens=50, |
| temperature=0.7 |
| ) |
| |
| generated_text = tokenizer.decode(generated[0], skip_special_tokens=True) |
| print(f" 生成文本: {generated_text}") |
| |
| except ImportError as e: |
| print(f"\n注意: 需要安装 transformers") |
| print(f"pip install transformers") |
| print(f"错误: {e}") |
| |
| |
| |
| |
| |
| print("\n" + "=" * 70) |
| print("[独立测试] NeuralMemory 组件") |
| print("=" * 70) |
| |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' |
| |
| |
| mem = NeuralMemory( |
| dim=512, |
| chunk_size=32, |
| batch_size=64, |
| dim_head=64, |
| heads=8, |
| model=MemoryMLP(dim=64, depth=2), |
| momentum=True, |
| qk_rmsnorm=True, |
| ).to(device) |
| |
| |
| batch_size = 2 |
| seq_len = 256 |
| hidden_dim = 512 |
| |
| x = torch.randn(batch_size, seq_len, hidden_dim).to(device) |
| |
| print(f"\n输入形状: {x.shape}") |
| |
| |
| retrieved, state = mem(x) |
| print(f"检索输出形状: {retrieved.shape}") |
| print(f"记忆状态 seq_index: {state.seq_index}") |
| |
| |
| x2 = torch.randn(batch_size, seq_len, hidden_dim).to(device) |
| retrieved2, state2 = mem(x2, state=state) |
| print(f"第二次检索输出形状: {retrieved2.shape}") |
| print(f"更新后 seq_index: {state2.seq_index}") |
| |
| print("\n" + "=" * 70) |
| print("完成!") |
| print("=" * 70) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|