| """ |
| Titans Neural Memory 与 Qwen 模型集成示例 |
| |
| 本文件展示了如何将 Titans 的 NeuralMemory 模块集成到 Qwen 模型中, |
| 以增强其长期记忆能力。 |
| |
| 主要集成方案: |
| 1. 作为独立的记忆增强模块(Memory Augmented) |
| 2. 替换/增强特定层的注意力机制 |
| 3. Memory-as-Context 方式(类似 MAC Transformer) |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| from torch import Tensor |
| from typing import Optional, Tuple |
| from einops import rearrange, repeat |
| from copy import deepcopy |
|
|
| |
| from titans_pytorch import NeuralMemory, MemoryMLP, NeuralMemState |
|
|
|
|
| |
| |
| |
|
|
| class TitansMemoryWrapper(nn.Module): |
| """ |
| 最简单的集成方式:在 Qwen 模型外部添加 Titans 记忆模块 |
| |
| 工作原理: |
| 1. 使用 NeuralMemory 存储和检索长期信息 |
| 2. 将检索到的记忆与 Qwen 的输出融合 |
| |
| 适用场景: |
| - 不想修改 Qwen 内部结构 |
| - 需要快速验证 Titans 记忆的效果 |
| """ |
| |
| def __init__( |
| self, |
| qwen_model, |
| hidden_size: int = 896, |
| chunk_size: int = 64, |
| memory_batch_size: int = 128, |
| num_heads: int = 4, |
| dim_head: int = 64, |
| memory_depth: int = 2, |
| ): |
| super().__init__() |
| self.qwen = qwen_model |
| |
| |
| self.mem_dim = dim_head * num_heads |
| self.to_mem_input = nn.Linear(hidden_size, self.mem_dim) |
| self.from_mem_output = nn.Linear(self.mem_dim, hidden_size) |
| |
| |
| memory_model = MemoryMLP( |
| dim=dim_head, |
| depth=memory_depth, |
| expansion_factor=2.0 |
| ) |
| |
| self.neural_memory = NeuralMemory( |
| dim=self.mem_dim, |
| chunk_size=chunk_size, |
| batch_size=memory_batch_size, |
| dim_head=dim_head, |
| heads=num_heads, |
| model=memory_model, |
| momentum=True, |
| momentum_order=1, |
| qk_rmsnorm=True, |
| ) |
| |
| |
| self.fusion_gate = nn.Sequential( |
| nn.Linear(hidden_size * 2, hidden_size), |
| nn.Sigmoid() |
| ) |
| |
| def forward( |
| self, |
| input_ids: Tensor, |
| attention_mask: Optional[Tensor] = None, |
| memory_state: Optional[NeuralMemState] = None, |
| **kwargs |
| ): |
| |
| qwen_outputs = self.qwen( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| output_hidden_states=True, |
| **kwargs |
| ) |
| |
| hidden_states = qwen_outputs.hidden_states[-1] |
| |
| |
| mem_input = self.to_mem_input(hidden_states) |
| |
| |
| retrieved, next_memory_state = self.neural_memory( |
| mem_input, |
| state=memory_state |
| ) |
| |
| |
| retrieved_hidden = self.from_mem_output(retrieved) |
| |
| |
| gate = self.fusion_gate(torch.cat([hidden_states, retrieved_hidden], dim=-1)) |
| enhanced_hidden = hidden_states + gate * retrieved_hidden |
| |
| |
| |
| if hasattr(self.qwen, 'lm_head'): |
| logits = self.qwen.lm_head(enhanced_hidden) |
| else: |
| logits = qwen_outputs.logits |
| |
| return { |
| 'logits': logits, |
| 'hidden_states': enhanced_hidden, |
| 'memory_state': next_memory_state, |
| 'qwen_outputs': qwen_outputs |
| } |
|
|
|
|
| |
| |
| |
|
|
| class QwenDecoderLayerWithMemory(nn.Module): |
| """ |
| 修改后的 Qwen Decoder 层,集成了 Titans 记忆模块 |
| |
| 在每个 attention 层后添加记忆检索和更新 |
| """ |
| |
| def __init__( |
| self, |
| original_layer, |
| hidden_size: int, |
| chunk_size: int = 64, |
| memory_batch_size: int = 128, |
| num_heads: int = 4, |
| dim_head: int = 64, |
| ): |
| super().__init__() |
| |
| |
| self.self_attn = original_layer.self_attn |
| self.mlp = original_layer.mlp |
| self.input_layernorm = original_layer.input_layernorm |
| self.post_attention_layernorm = original_layer.post_attention_layernorm |
| |
| |
| self.mem_dim = dim_head * num_heads |
| self.to_mem = nn.Linear(hidden_size, self.mem_dim) |
| self.from_mem = nn.Linear(self.mem_dim, hidden_size) |
| |
| memory_model = MemoryMLP(dim=dim_head, depth=2) |
| |
| self.neural_memory = NeuralMemory( |
| dim=self.mem_dim, |
| chunk_size=chunk_size, |
| batch_size=memory_batch_size, |
| dim_head=dim_head, |
| heads=num_heads, |
| model=memory_model, |
| momentum=True, |
| ) |
| |
| |
| self.mem_gate = nn.Sequential( |
| nn.Linear(hidden_size, hidden_size), |
| nn.Sigmoid() |
| ) |
| |
| def forward( |
| self, |
| hidden_states: Tensor, |
| attention_mask: Optional[Tensor] = None, |
| position_ids: Optional[Tensor] = None, |
| memory_state: Optional[NeuralMemState] = None, |
| **kwargs |
| ): |
| |
| residual = hidden_states |
| hidden_states = self.input_layernorm(hidden_states) |
| |
| attn_output, attn_weights, _ = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| **kwargs |
| ) |
| hidden_states = residual + attn_output |
| |
| |
| mem_input = self.to_mem(hidden_states) |
| retrieved, next_memory_state = self.neural_memory( |
| mem_input, |
| state=memory_state |
| ) |
| mem_output = self.from_mem(retrieved) |
| |
| |
| gate = self.mem_gate(hidden_states) |
| hidden_states = hidden_states + gate * mem_output |
| |
| |
| |
| residual = hidden_states |
| hidden_states = self.post_attention_layernorm(hidden_states) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
| |
| return hidden_states, next_memory_state |
|
|
|
|
| |
| |
| |
|
|
| class QwenWithMAC(nn.Module): |
| """ |
| Memory-as-Context 方式集成 Titans 到 Qwen |
| |
| 核心思想: |
| 1. 将长序列分成多个 segment |
| 2. 每个 segment 的开头添加 longterm memory tokens |
| 3. 使用 NeuralMemory 来更新这些 memory tokens |
| |
| 这种方式最接近 Titans 论文中的 MAC 配置 |
| """ |
| |
| def __init__( |
| self, |
| qwen_model, |
| hidden_size: int = 896, |
| segment_len: int = 128, |
| num_longterm_mem_tokens: int = 16, |
| num_persist_mem_tokens: int = 4, |
| memory_layers: Tuple[int, ...] = (2, 4, 6), |
| chunk_size: int = 64, |
| memory_batch_size: int = 128, |
| ): |
| super().__init__() |
| |
| self.qwen = qwen_model |
| self.hidden_size = hidden_size |
| self.segment_len = segment_len |
| self.num_longterm_mem_tokens = num_longterm_mem_tokens |
| |
| |
| self.persist_mem = nn.Parameter( |
| torch.randn(num_persist_mem_tokens, hidden_size) * 0.02 |
| ) |
| |
| |
| self.longterm_mem = nn.Parameter( |
| torch.randn(num_longterm_mem_tokens, hidden_size) * 0.02 |
| ) |
| |
| |
| self.memory_layers = memory_layers |
| self.neural_memories = nn.ModuleDict() |
| |
| memory_model = MemoryMLP(dim=64, depth=2) |
| |
| for layer_idx in memory_layers: |
| self.neural_memories[str(layer_idx)] = NeuralMemory( |
| dim=hidden_size, |
| chunk_size=chunk_size, |
| batch_size=memory_batch_size, |
| dim_head=64, |
| heads=hidden_size // 64, |
| model=deepcopy(memory_model), |
| momentum=True, |
| qk_rmsnorm=True, |
| ) |
| |
| def prepare_inputs_with_memory( |
| self, |
| hidden_states: Tensor, |
| batch_size: int, |
| ) -> Tensor: |
| """ |
| 在每个 segment 开头插入 memory tokens |
| """ |
| seq_len = hidden_states.shape[1] |
| num_segments = (seq_len + self.segment_len - 1) // self.segment_len |
| |
| |
| longterm = repeat( |
| self.longterm_mem, |
| 'n d -> b s n d', |
| b=batch_size, |
| s=num_segments |
| ) |
| |
| |
| padded_len = num_segments * self.segment_len |
| if seq_len < padded_len: |
| hidden_states = nn.functional.pad( |
| hidden_states, |
| (0, 0, 0, padded_len - seq_len) |
| ) |
| |
| hidden_states = rearrange( |
| hidden_states, |
| 'b (s n) d -> b s n d', |
| n=self.segment_len |
| ) |
| |
| |
| hidden_states = torch.cat([longterm, hidden_states], dim=2) |
| |
| |
| hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d') |
| |
| |
| persist = repeat(self.persist_mem, 'n d -> b n d', b=batch_size) |
| hidden_states = torch.cat([persist, hidden_states], dim=1) |
| |
| return hidden_states |
| |
| def forward( |
| self, |
| input_ids: Tensor, |
| attention_mask: Optional[Tensor] = None, |
| memory_states: Optional[dict] = None, |
| **kwargs |
| ): |
| batch_size = input_ids.shape[0] |
| |
| |
| if hasattr(self.qwen.model, 'embed_tokens'): |
| hidden_states = self.qwen.model.embed_tokens(input_ids) |
| else: |
| hidden_states = self.qwen.get_input_embeddings()(input_ids) |
| |
| |
| hidden_states = self.prepare_inputs_with_memory(hidden_states, batch_size) |
| |
| |
| if memory_states is None: |
| memory_states = {} |
| |
| next_memory_states = {} |
| |
| |
| for layer_idx, layer in enumerate(self.qwen.model.layers): |
| |
| layer_outputs = layer( |
| hidden_states, |
| attention_mask=None, |
| **kwargs |
| ) |
| hidden_states = layer_outputs[0] |
| |
| |
| if str(layer_idx) in self.neural_memories: |
| neural_mem = self.neural_memories[str(layer_idx)] |
| mem_state = memory_states.get(str(layer_idx)) |
| |
| retrieved, next_state = neural_mem( |
| hidden_states, |
| state=mem_state |
| ) |
| |
| |
| hidden_states = hidden_states + retrieved * 0.1 |
| next_memory_states[str(layer_idx)] = next_state |
| |
| |
| hidden_states = self.qwen.model.norm(hidden_states) |
| |
| |
| logits = self.qwen.lm_head(hidden_states) |
| |
| return { |
| 'logits': logits, |
| 'hidden_states': hidden_states, |
| 'memory_states': next_memory_states |
| } |
|
|
|
|
| |
| |
| |
|
|
| def example_usage(): |
| """展示如何使用上述集成方案""" |
| |
| print("=" * 60) |
| print("Titans Neural Memory 与 Qwen 集成示例") |
| print("=" * 60) |
| |
| |
| |
| |
| try: |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| |
| model_name = "Qwen/Qwen2-0.5B" |
| |
| print(f"\n加载模型: {model_name}") |
| tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
| qwen_model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| torch_dtype=torch.float16, |
| device_map="auto", |
| trust_remote_code=True |
| ) |
| |
| |
| hidden_size = qwen_model.config.hidden_size |
| print(f"模型隐藏层大小: {hidden_size}") |
| |
| |
| print("\n--- 方案 1: TitansMemoryWrapper ---") |
| wrapped_model = TitansMemoryWrapper( |
| qwen_model=qwen_model, |
| hidden_size=hidden_size, |
| chunk_size=64, |
| memory_batch_size=128, |
| ) |
| |
| |
| text = "人工智能的发展历程" |
| inputs = tokenizer(text, return_tensors="pt") |
| |
| with torch.no_grad(): |
| outputs = wrapped_model( |
| input_ids=inputs.input_ids.to(qwen_model.device), |
| ) |
| print(f"输出 logits 形状: {outputs['logits'].shape}") |
| print(f"记忆状态: {type(outputs['memory_state'])}") |
| |
| except ImportError as e: |
| print(f"\n注意: 需要安装相关依赖") |
| print(f"pip install transformers torch titans-pytorch") |
| print(f"错误: {e}") |
| |
| |
| print("\n--- 独立测试 NeuralMemory ---") |
| |
| mem = NeuralMemory( |
| dim=384, |
| chunk_size=64, |
| batch_size=128, |
| dim_head=64, |
| heads=4, |
| model=MemoryMLP(dim=64, depth=2), |
| momentum=True, |
| ).cuda() if torch.cuda.is_available() else NeuralMemory( |
| dim=384, |
| chunk_size=64, |
| batch_size=128, |
| dim_head=64, |
| heads=4, |
| model=MemoryMLP(dim=64, depth=2), |
| momentum=True, |
| ) |
| |
| |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' |
| seq = torch.randn(2, 256, 384).to(device) |
| |
| retrieved, mem_state = mem(seq) |
| print(f"输入形状: {seq.shape}") |
| print(f"检索输出形状: {retrieved.shape}") |
| print(f"记忆状态序列索引: {mem_state.seq_index}") |
| |
| print("\n" + "=" * 60) |
| print("集成完成!") |
| print("=" * 60) |
|
|
|
|
| if __name__ == "__main__": |
| example_usage() |
|
|