| """PyTorch OpenAI GPT-2 model modified with MultiQuery attention""" |
|
|
| from typing import Optional, Tuple, Union |
|
|
| import math |
| import torch |
| import torch.utils.checkpoint |
| from torch import nn |
|
|
| from transformers.activations import ACT2FN |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPastAndCrossAttentions, |
| ) |
| from transformers.models.gpt2.modeling_gpt2 import GPT2Model, GPT2Block, GPT2PreTrainedModel, GPT2LMHeadModel |
| from transformers.utils import logging |
| from .configuration_gpt2_mq import GPT2CustomConfig, MULTI_QUERY |
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| def make_causal_mask( |
| input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int |
| ) -> torch.BoolTensor: |
| """ |
| Make causal mask used for self-attention. |
| """ |
| batch_size, target_length = input_ids_shape |
| mask = torch.empty((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device) |
| |
| seq_ids = torch.arange(target_length, device=device) |
| mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :] |
|
|
| if past_key_values_length > 0: |
| mask[:, :past_key_values_length] = False |
|
|
| expanded_mask = mask[None, :, :].expand(batch_size, target_length, target_length + past_key_values_length) |
| return expanded_mask |
|
|
|
|
| def expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: |
| """ |
| Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. |
| """ |
| batch_size, src_length = mask.shape |
| tgt_length = tgt_length if tgt_length is not None else src_length |
|
|
| expanded_mask = ~(mask[:, None, :].to(torch.bool)) |
| return expanded_mask.expand(batch_size, tgt_length, src_length) |
|
|
|
|
| def prepare_attn_mask( |
| attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int |
| ) -> torch.BoolTensor: |
| |
| |
| combined_attention_mask = None |
| device = attention_mask.device |
| _, src_length = input_shape |
|
|
| if src_length > 1: |
| combined_attention_mask = make_causal_mask( |
| input_shape, device=device, past_key_values_length=past_key_values_length |
| ) |
|
|
| |
| expanded_attn_mask = expand_mask(attention_mask, tgt_length=src_length) |
| combined_attention_mask = ( |
| expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask |
| ) |
|
|
| return combined_attention_mask |
|
|
|
|
| class LinearGPT2MLP(nn.Module): |
| def __init__(self, intermediate_size, config): |
| super().__init__() |
| embed_dim = config.hidden_size |
| self.c_fc = nn.Linear(embed_dim, intermediate_size) |
| self.c_proj = nn.Linear(intermediate_size, embed_dim) |
| self.act = ACT2FN[config.activation_function] if "gelu" not in config.activation_function else lambda \ |
| x: torch.nn.functional.gelu(x, approximate="tanh") |
| self.dropout = nn.Dropout(config.resid_pdrop) |
|
|
| def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: |
| hidden_states = self.c_fc(hidden_states) |
| hidden_states = self.act(hidden_states) |
| hidden_states = self.c_proj(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| return hidden_states |
|
|
|
|
| class GPT2MQAttention(nn.Module): |
| def __init__(self, config, is_cross_attention=False, layer_idx=None): |
| super().__init__() |
| assert config.attention_head_type == MULTI_QUERY |
|
|
| self.embed_dim = config.hidden_size |
| self.num_heads = config.num_attention_heads |
| self.head_dim = self.embed_dim // self.num_heads |
| self.split_size = self.embed_dim |
| if self.head_dim * self.num_heads != self.embed_dim: |
| raise ValueError( |
| f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| f" {self.num_heads})." |
| ) |
|
|
| self.scale_attn_weights = config.scale_attn_weights |
| if is_cross_attention: |
| raise NotImplementedError("Cross-attention not implemented for MQA") |
| self.is_cross_attention = is_cross_attention |
|
|
| |
| self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx |
| self.layer_idx = layer_idx |
| self.reorder_and_upcast_attn = config.reorder_and_upcast_attn |
|
|
| if self.is_cross_attention: |
| raise NotImplementedError("Cross-attention not implemented for MQA") |
| else: |
| self.attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.head_dim) |
| self.c_proj = nn.Linear(self.embed_dim, self.embed_dim) |
|
|
| self.attn_dropout = nn.Dropout(config.attn_pdrop) |
| self.resid_dropout = nn.Dropout(config.resid_pdrop) |
|
|
| self.pruned_heads = set() |
| self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) |
|
|
| def _attn(self, query, key, value, attention_mask=None, head_mask=None): |
| |
| |
| |
| batch_size = query.size(0) |
| query_length = query.size(1) // self.num_heads |
| key_length = key.size(2) |
| |
|
|
| if self.scale_attn_weights: |
| query = query * self.inv_norm_factor |
|
|
| attn_weights = torch.bmm(query, key) |
|
|
| |
| attn_weights = attn_weights.view(batch_size, query_length, self.num_heads, key_length) |
|
|
| |
| if self.scale_attn_by_inverse_layer_idx: |
| attn_weights = attn_weights / float(self.layer_idx + 1) |
|
|
| if attention_mask is not None: |
| attn_weights = attn_weights.masked_fill_(attention_mask, torch.finfo(attn_weights.dtype).min) |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
|
|
| |
| attn_weights = attn_weights.type(value.dtype) |
| attn_weights = self.attn_dropout(attn_weights) |
|
|
| |
| if head_mask is not None: |
| raise NotImplementedError |
|
|
| |
| _attn_weights = attn_weights.view(batch_size, query_length * self.num_heads, key_length) |
| |
| attn_output = torch.bmm(_attn_weights, value) |
| attn_output = attn_output.view(batch_size, query_length, self.num_heads, self.head_dim) |
|
|
| return attn_output, attn_weights |
|
|
| def _merge_heads(self, tensor): |
| """ |
| Merges attn_head_size dim and num_attn_heads dim into hidden_size |
| """ |
| batch_size, seq_length, num_heads, head_dim = tensor.shape |
| return tensor.view(batch_size, seq_length, num_heads * head_dim) |
|
|
| def forward( |
| self, |
| hidden_states: Optional[Tuple[torch.FloatTensor]], |
| layer_past: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: |
| if encoder_hidden_states is not None: |
| raise NotImplementedError("Cross-attention not implemented for MQA") |
| else: |
| qkv = self.attn(hidden_states) |
| query, key, value = qkv.split([self.embed_dim, self.head_dim, self.head_dim], dim=2) |
|
|
| batch_size, seq_length = query.shape[:2] |
|
|
| |
| |
| query = query.reshape(batch_size, seq_length * self.num_heads, self.head_dim) |
|
|
| key = key.transpose(1, 2) |
|
|
| if layer_past is not None: |
| past_key, past_value = layer_past |
| |
| key = torch.cat((past_key, key), dim=-1) |
| value = torch.cat((past_value, value), dim=-2) |
|
|
| if use_cache is True: |
| present = (key, value) |
| else: |
| present = None |
|
|
| if self.reorder_and_upcast_attn: |
| raise NotImplementedError("Reorder and upcast attention not implemented for MQA") |
| else: |
| attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) |
|
|
| attn_output = self._merge_heads(attn_output) |
| attn_output = self.c_proj(attn_output) |
| attn_output = self.resid_dropout(attn_output) |
|
|
| outputs = (attn_output, present) |
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
| |
| class GPT2CustomBlock(GPT2Block): |
|
|
| def __init__(self, config: GPT2CustomConfig, layer_idx=None): |
| super().__init__(config, layer_idx) |
| |
| if config.attention_head_type == MULTI_QUERY: |
| self.attn = GPT2MQAttention(config, layer_idx=layer_idx) |
| if config.add_cross_attention: |
| raise NotImplementedError("Cross-attention not implemented for MQA") |
|
|
| hidden_size = config.hidden_size |
| inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size |
| self.mlp = LinearGPT2MLP(inner_dim, config) |
|
|
|
|
| |
| class GPT2CustomModel(GPT2Model): |
| config_class = GPT2CustomConfig |
|
|
| def __init__(self, config): |
| GPT2PreTrainedModel.__init__(self, config) |
|
|
| if config.attention_head_type != MULTI_QUERY: |
| raise NotImplementedError("optimized gpt2 is not implemented for MHA") |
|
|
| self.embed_dim = config.hidden_size |
|
|
| self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
| self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) |
|
|
| self.drop = nn.Dropout(config.embd_pdrop) |
| self.h = nn.ModuleList([GPT2CustomBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) |
| self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
| self.gradient_checkpointing = False |
|
|
| |
| self.post_init() |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| elif input_ids is not None: |
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| batch_size = input_ids.shape[0] |
| seq_length = input_ids.shape[1] |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| batch_size = inputs_embeds.shape[0] |
| seq_length = input_ids.shape[1] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
| if token_type_ids is not None: |
| token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
| if position_ids is not None: |
| position_ids = position_ids.view(-1, input_shape[-1]) |
|
|
| if past_key_values is None: |
| past_key_values = tuple([None] * len(self.h)) |
|
|
| seq_length_with_past = seq_length |
| past_key_values_length = 0 |
| if past_key_values[0] is not None: |
| past_key_values_length = past_key_values[0][0].shape[-1] |
| seq_length_with_past = seq_length_with_past + past_key_values_length |
| if position_ids is None: |
| position_ids = torch.arange(past_key_values_length, input_shape[-1] + past_key_values_length, |
| dtype=torch.long, device=device) |
| position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) |
|
|
| |
| if attention_mask is None: |
| attention_mask = torch.ones((batch_size, seq_length_with_past), device=input_ids.device) |
| else: |
| attention_mask = attention_mask.to(input_ids.device) |
|
|
| attention_mask = prepare_attn_mask( |
| attention_mask, |
| input_shape=(batch_size, seq_length), |
| past_key_values_length=past_key_values_length, |
| ) |
|
|
| attention_mask = attention_mask.unsqueeze(2).expand(batch_size, attention_mask.shape[1], self.config.num_attention_heads, attention_mask.shape[2]) |
|
|
| |
| |
| if self.config.add_cross_attention and encoder_hidden_states is not None: |
| raise NotImplementedError |
| else: |
| encoder_attention_mask = None |
|
|
| |
| |
| |
| |
| head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.wte(input_ids) |
| position_embeds = self.wpe(position_ids) |
| hidden_states = inputs_embeds + position_embeds |
|
|
| if token_type_ids is not None: |
| token_type_embeds = self.wte(token_type_ids) |
| hidden_states = hidden_states + token_type_embeds |
|
|
| hidden_states = self.drop(hidden_states) |
|
|
| output_shape = input_shape + (hidden_states.size(-1),) |
|
|
| presents = () if use_cache else None |
| all_self_attentions = () if output_attentions else None |
| all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None |
| all_hidden_states = () if output_hidden_states else None |
| for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): |
|
|
| |
| if self.model_parallel: |
| torch.cuda.set_device(hidden_states.device) |
| |
| if layer_past is not None: |
| layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) |
| |
| if attention_mask is not None: |
| attention_mask = attention_mask.to(hidden_states.device) |
| if isinstance(head_mask, torch.Tensor): |
| head_mask = head_mask.to(hidden_states.device) |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if self.gradient_checkpointing and self.training: |
|
|
| if use_cache: |
| logger.warning( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| |
| return module(*inputs, use_cache, output_attentions) |
|
|
| return custom_forward |
|
|
| outputs = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(block), |
| hidden_states, |
| None, |
| attention_mask, |
| head_mask[i], |
| encoder_hidden_states, |
| encoder_attention_mask, |
| ) |
| else: |
| outputs = block( |
| hidden_states, |
| layer_past=layer_past, |
| attention_mask=attention_mask, |
| head_mask=head_mask[i], |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| ) |
|
|
| hidden_states = outputs[0] |
| if use_cache is True: |
| presents = presents + (outputs[1],) |
|
|
| if output_attentions: |
| all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) |
| if self.config.add_cross_attention: |
| all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) |
|
|
| |
| if self.model_parallel: |
| for k, v in self.device_map.items(): |
| if i == v[-1] and "cuda:" + str(k) != self.last_device: |
| hidden_states = hidden_states.to("cuda:" + str(k + 1)) |
|
|
| hidden_states = self.ln_f(hidden_states) |
|
|
| hidden_states = hidden_states.view(output_shape) |
| |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple( |
| v |
| for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] |
| if v is not None |
| ) |
|
|
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=presents, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attentions, |
| cross_attentions=all_cross_attentions, |
| ) |
|
|
|
|
| class GPT2LMHeadCustomModel(GPT2LMHeadModel): |
| config_class = GPT2CustomConfig |
|
|
| def __init__(self, config): |
| GPT2PreTrainedModel.__init__(self, config) |
| self.transformer = GPT2CustomModel(config) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|