| """ |
| |
| This code is in part adapted from AllenAI's Longformer: |
| https://github.com/allenai/longformer/ |
| and in part adapted from: |
| https://github.com/huggingface/transformers |
| |
| Author: Annette Rios (rios@cl.uzh.ch) |
| |
| """ |
| from typing import List, Optional, Tuple, Dict, Union |
| from torch import nn, Tensor, zeros |
| import torch |
| import math |
| import random |
| from transformers.models.mbart.modeling_mbart import MBartConfig, MBartForConditionalGeneration, MBartEncoder, MBartLearnedPositionalEmbedding, MBartEncoderLayer, MBartDecoder, MBartModel, _expand_mask |
| from transformers.modeling_outputs import BaseModelOutput,Seq2SeqModelOutput |
| from transformers.configuration_utils import PretrainedConfig |
| from transformers import GPT2Model, GPT2Config, AutoModelForCausalLM,AutoConfig |
| from transformers.activations import ACT2FN |
|
|
| import torch.nn.functional as F |
| from transformers.models.roberta.modeling_roberta import RobertaConfig, RobertaModel, RobertaForMaskedLM |
|
|
| from functools import lru_cache |
| import os.path |
|
|
|
|
| class MLongformerEncoderDecoderForConditionalGenerationCustom(MBartForConditionalGeneration): |
| def __init__(self, config): |
| super(MBartForConditionalGeneration, self).__init__(config) |
| self.decoder_config = GPT2Config.from_dict(config.decoder_config) |
| self.decoder_config.add_cross_attention=True |
| self.config.eos_token_id = self.decoder_config.eos_token_id |
| |
|
|
| self.model = LongMBartModelCustom(config) |
| |
|
|
| if self.config.from_mbart: |
| self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) |
| self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) |
| else: |
| self.lm_head = nn.Linear(self.decoder_config.n_embd, self.decoder_config.vocab_size, bias=False) |
| self.register_buffer("final_logits_bias", torch.zeros((1, self.decoder_config.vocab_size))) |
|
|
| self.model.decoder = GPT2Model(self.decoder_config) |
| if config.attention_mode == 'n2': |
| pass |
| else: |
| for i, layer in enumerate(self.model.encoder.layers): |
| layer.self_attn = LongformerSelfAttentionForMBart(config, layer_id=i) |
| |
| self.post_init() |
|
|
| def post_init(self): |
| super().post_init() |
| if not self.config.from_mbart: |
| self.lm_head = nn.Linear(self.decoder_config.n_embd, self.decoder_config.vocab_size, bias=False) |
|
|
| def _set_gradient_checkpointing(self, module, value=False): |
| if isinstance(module, (MBartDecoder)): |
| module.gradient_checkpointing = value |
| self.model.decoder._set_gradient_checkpointing(module, value=value) |
|
|
| @classmethod |
| def from_encoder_decoder_pretrained( |
| cls, |
| mbart_pretrained_model_name_or_path: str = None, |
| decoder_pretrained_model_name_or_path: str = None, |
| *model_args, |
| **kwargs |
| ) -> MBartForConditionalGeneration: |
| config = MLongformerEncoderDecoderConfigCustom.from_pretrained(mbart_pretrained_model_name_or_path) |
| config.from_mbart = True |
| config.tie_word_embeddings = False |
| config.decoder_config = GPT2Config.from_pretrained(decoder_pretrained_model_name_or_path).to_dict() |
|
|
| mbart = super().from_pretrained(mbart_pretrained_model_name_or_path, config=config) |
| decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, add_cross_attention=True) |
|
|
| mbart.model.decoder = decoder.transformer |
| mbart.lm_head = decoder.lm_head |
| mbart.register_buffer("final_logits_bias", torch.zeros((1, decoder.config.vocab_size))) |
|
|
| |
| mbart.model.enc_to_dec_proj.apply(mbart.model._init_weights) |
| for layer in mbart.model.decoder.h: |
| layer.crossattention.c_attn.apply(mbart.model.decoder._init_weights) |
|
|
| del mbart.model.shared |
| return mbart |
|
|
|
|
| class MLongformerEncoderDecoderConfigCustom(MBartConfig): |
| def __init__(self, attention_window: List[int] = None, attention_dilation: List[int] = None, |
| autoregressive: bool = False, attention_mode: str = 'sliding_chunks', |
| gradient_checkpointing: bool = False, **kwargs): |
| """ |
| Args: |
| attention_window: list of attention window sizes of length = number of layers. |
| window size = number of attention locations on each side. |
| For an affective window size of 512, use `attention_window=[256]*num_layers` |
| which is 256 on each side. |
| attention_dilation: list of attention dilation of length = number of layers. |
| attention dilation of `1` means no dilation. |
| autoregressive: do autoregressive attention or have attention of both sides |
| attention_mode: 'n2' for regular n^2 self-attention, 'tvm' for TVM implemenation of Longformer |
| selfattention, 'sliding_chunks' for another implementation of Longformer selfattention |
| """ |
| super().__init__(**kwargs) |
| self.from_mbart = False |
| self.attention_window = attention_window |
| self.attention_dilation = attention_dilation |
| self.autoregressive = autoregressive |
| self.attention_mode = attention_mode |
| self.gradient_checkpointing = gradient_checkpointing |
| assert self.attention_mode in ['sliding_chunks', 'n2'] |
|
|
|
|
| class LongMBartModelCustom(MBartModel): |
| def __init__(self, config: MBartConfig): |
| super().__init__(config) |
| del self.shared |
| decoder_config = GPT2Config.from_dict(config.decoder_config) |
|
|
| padding_idx, vocab_size = config.pad_token_id, config.vocab_size |
| if self.config.from_mbart: |
| self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) |
|
|
| self.encoder = LongMBartEncoder(config) |
| self.enc_to_dec_proj = torch.nn.Linear(config.d_model, decoder_config.n_embd) |
| self.act = ACT2FN[decoder_config.activation_function] |
| self.decoder = GPT2Model(decoder_config) |
|
|
| |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.encoder.embed_tokens |
|
|
| def set_input_embeddings(self, value): |
| self.encoder.embed_tokens = value |
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| decoder_input_ids: Optional[torch.LongTensor] = None, |
| decoder_attention_mask: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| decoder_head_mask: Optional[torch.Tensor] = None, |
| cross_attn_head_mask: Optional[torch.Tensor] = None, |
| encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| decoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ): |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
| |
| if decoder_input_ids is None and decoder_inputs_embeds is None: |
| decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) |
|
|
| |
| |
| |
| |
|
|
| if encoder_outputs is None: |
| encoder_outputs = self.encoder( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| |
| elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): |
| encoder_outputs = BaseModelOutput( |
| last_hidden_state=encoder_outputs[0], |
| hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, |
| attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, |
| ) |
|
|
| encoder_hidden_states = encoder_outputs[0] |
|
|
| |
| non_empty_mask = attention_mask.abs().sum(dim=0).bool() |
| encoder_hidden_states = encoder_hidden_states[:,non_empty_mask] |
| encoder_attention_mask = attention_mask[:,non_empty_mask] |
|
|
| |
| encoder_attention_mask = torch.clamp(encoder_attention_mask, min=0, max=1) |
|
|
| encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) |
| encoder_hidden_states = self.act(encoder_hidden_states) |
| encoder_hidden_states = torch.nn.Dropout(p=0.1)(encoder_hidden_states) |
|
|
| |
| decoder_outputs = self.decoder( |
| input_ids=decoder_input_ids, |
| attention_mask=decoder_attention_mask, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| head_mask=decoder_head_mask, |
| |
| past_key_values=past_key_values, |
| inputs_embeds=decoder_inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| if not return_dict: |
| return decoder_outputs + encoder_outputs |
|
|
| return Seq2SeqModelOutput( |
| last_hidden_state=decoder_outputs.last_hidden_state, |
| past_key_values=decoder_outputs.past_key_values, |
| decoder_hidden_states=decoder_outputs.hidden_states, |
| decoder_attentions=decoder_outputs.attentions, |
| cross_attentions=decoder_outputs.cross_attentions, |
| encoder_last_hidden_state=encoder_outputs.last_hidden_state, |
| encoder_hidden_states=encoder_outputs.hidden_states, |
| encoder_attentions=encoder_outputs.attentions, |
| ) |
| |
| class MLongformerEncoderDecoderForConditionalGeneration(MBartForConditionalGeneration): |
| def __init__(self, config): |
| super(MBartForConditionalGeneration, self).__init__(config) |
|
|
| self.model = LongMBartModel(config) |
| self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) |
| self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) |
| |
|
|
| if config.attention_mode == 'n2': |
| pass |
| else: |
| for i, layer in enumerate(self.model.encoder.layers): |
| layer.self_attn = LongformerSelfAttentionForMBart(config, layer_id=i) |
| |
| self.post_init() |
|
|
|
|
| class MLongformerEncoderDecoderConfig(MBartConfig): |
| def __init__(self, attention_window: List[int] = None, attention_dilation: List[int] = None, |
| autoregressive: bool = False, attention_mode: str = 'sliding_chunks', |
| gradient_checkpointing: bool = False, **kwargs): |
| """ |
| Args: |
| attention_window: list of attention window sizes of length = number of layers. |
| window size = number of attention locations on each side. |
| For an affective window size of 512, use `attention_window=[256]*num_layers` |
| which is 256 on each side. |
| attention_dilation: list of attention dilation of length = number of layers. |
| attention dilation of `1` means no dilation. |
| autoregressive: do autoregressive attention or have attention of both sides |
| attention_mode: 'n2' for regular n^2 self-attention, 'tvm' for TVM implemenation of Longformer |
| selfattention, 'sliding_chunks' for another implementation of Longformer selfattention |
| """ |
| super().__init__(**kwargs) |
| self.attention_window = attention_window |
| self.attention_dilation = attention_dilation |
| self.autoregressive = autoregressive |
| self.attention_mode = attention_mode |
| self.gradient_checkpointing = gradient_checkpointing |
| assert self.attention_mode in ['sliding_chunks', 'n2'] |
|
|
| class LongformerSelfAttentionForMBart(nn.Module): |
| def __init__(self, config, layer_id): |
| super().__init__() |
| self.embed_dim = config.d_model |
| self.longformer_self_attn = LongformerSelfAttention(config, layer_id=layer_id) |
| self.output = nn.Linear(self.embed_dim, self.embed_dim) |
|
|
| def forward( |
| self, |
| hidden_states: Tensor, |
| key_value_states: Optional[Tensor] = None, |
| past_key_value: Optional[Tuple[Tensor]] = None, |
| attention_mask: Optional[Tensor] = None, |
| layer_head_mask: Optional[Tensor] = None, |
| output_attentions: bool = False |
| ) -> Tuple[Tensor, Optional[Tensor]]: |
|
|
| bsz, tgt_len, embed_dim = hidden_states.size() |
| assert embed_dim == self.embed_dim |
| assert list(hidden_states.size()) == [bsz, tgt_len, embed_dim] |
|
|
| outputs = self.longformer_self_attn( |
| hidden_states, |
| attention_mask=attention_mask * -1, |
| head_mask=None, |
| encoder_hidden_states=None, |
| encoder_attention_mask=None, |
| output_attentions=output_attentions, |
| ) |
|
|
| |
| attn_output = self.output(outputs[0]) |
| |
| return (attn_output, outputs[1:] ,None) if len(outputs) == 2 else (attn_output, None, None) |
|
|
|
|
| class LongMBartEncoder(MBartEncoder): |
| """ |
| Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a |
| [`MBartEncoderLayer`]. |
| |
| Args: |
| config: MBartConfig |
| embed_tokens (nn.Embedding): output embedding |
| """ |
|
|
| def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None): |
| super().__init__(config) |
|
|
| self.dropout = config.dropout |
| self.layerdrop = config.encoder_layerdrop |
|
|
| embed_dim = config.d_model |
| self.padding_idx = config.pad_token_id |
| self.max_source_positions = config.max_encoder_position_embeddings |
| self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 |
|
|
| if embed_tokens is not None: |
| self.embed_tokens = embed_tokens |
| else: |
| self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) |
|
|
| self.embed_positions = MBartLearnedPositionalEmbedding( |
| self.max_source_positions, |
| embed_dim, |
| ) |
| self.layers = nn.ModuleList([LongMBartEncoderLayer(config) for _ in range(config.encoder_layers)]) |
| self.layernorm_embedding = nn.LayerNorm(embed_dim) |
| self.layer_norm = nn.LayerNorm(config.d_model) |
|
|
| self.gradient_checkpointing = False |
| |
| self.post_init() |
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutput]: |
| r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you |
| provide it. |
| |
| Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| than the model's internal embedding lookup matrix. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| elif input_ids is not None: |
| input = input_ids |
| input_shape = input.shape |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| elif inputs_embeds is not None: |
| input = inputs_embeds[:, :, -1] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale |
|
|
| embed_pos = self.embed_positions(input) |
|
|
| hidden_states = inputs_embeds + embed_pos |
| hidden_states = self.layernorm_embedding(hidden_states) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
| |
| longformer_attention_mask = None |
| if attention_mask is not None: |
| |
| longformer_attention_mask = 1 - attention_mask |
| |
| attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) |
|
|
|
|
| encoder_states = () if output_hidden_states else None |
| all_attentions = () if output_attentions else None |
|
|
| |
| if head_mask is not None: |
| if head_mask.size()[0] != len(self.layers): |
| raise ValueError( |
| f"The head_mask should be specified for {len(self.layers)} layers, but it is for" |
| f" {head_mask.size()[0]}." |
| ) |
| for idx, encoder_layer in enumerate(self.layers): |
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
| |
| dropout_probability = random.uniform(0, 1) |
| if self.training and (dropout_probability < self.layerdrop): |
| layer_outputs = (None, None) |
| else: |
| if self.gradient_checkpointing and self.training: |
|
|
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| return module(*inputs, output_attentions) |
|
|
| return custom_forward |
|
|
| layer_outputs = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(encoder_layer), |
| hidden_states, |
| attention_mask, |
| longformer_attention_mask, |
| (head_mask[idx] if head_mask is not None else None), |
| ) |
| else: |
| layer_outputs = encoder_layer( |
| hidden_states, |
| attention_mask, |
| longformer_attention_mask, |
| layer_head_mask=(head_mask[idx] if head_mask is not None else None), |
| output_attentions=output_attentions, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if output_attentions: |
| all_attentions = all_attentions + (layer_outputs[1],) |
|
|
| hidden_states = self.layer_norm(hidden_states) |
| |
|
|
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) |
| return BaseModelOutput( |
| last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions |
| ) |
|
|
|
|
| class LongMBartModel(MBartModel): |
| def __init__(self, config: MBartConfig): |
| super().__init__(config) |
|
|
| padding_idx, vocab_size = config.pad_token_id, config.vocab_size |
| self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) |
|
|
| self.encoder = LongMBartEncoder(config, self.shared) |
| self.decoder = MBartDecoder(config, self.shared) |
|
|
| |
| self.post_init() |
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| decoder_input_ids: Optional[torch.LongTensor] = None, |
| decoder_attention_mask: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| decoder_head_mask: Optional[torch.Tensor] = None, |
| cross_attn_head_mask: Optional[torch.Tensor] = None, |
| encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| decoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Seq2SeqModelOutput, Tuple[torch.FloatTensor]]: |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
| |
| if decoder_input_ids is None and decoder_inputs_embeds is None: |
| decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) |
|
|
| if encoder_outputs is None: |
| encoder_outputs = self.encoder( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| |
| elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): |
| encoder_outputs = BaseModelOutput( |
| last_hidden_state=encoder_outputs[0], |
| hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, |
| attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, |
| ) |
|
|
| |
| decoder_outputs = self.decoder( |
| input_ids=decoder_input_ids, |
| attention_mask=decoder_attention_mask, |
| encoder_hidden_states=encoder_outputs[0], |
| encoder_attention_mask=attention_mask, |
| head_mask=decoder_head_mask, |
| cross_attn_head_mask=cross_attn_head_mask, |
| past_key_values=past_key_values, |
| inputs_embeds=decoder_inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| if not return_dict: |
| return decoder_outputs + encoder_outputs |
|
|
| return Seq2SeqModelOutput( |
| last_hidden_state=decoder_outputs.last_hidden_state, |
| past_key_values=decoder_outputs.past_key_values, |
| decoder_hidden_states=decoder_outputs.hidden_states, |
| decoder_attentions=decoder_outputs.attentions, |
| cross_attentions=decoder_outputs.cross_attentions, |
| encoder_last_hidden_state=encoder_outputs.last_hidden_state, |
| encoder_hidden_states=encoder_outputs.hidden_states, |
| encoder_attentions=encoder_outputs.attentions, |
| ) |
|
|
| class LongMBartEncoderLayer(MBartEncoderLayer): |
| def __init__(self, config: MBartConfig): |
| super().__init__(config) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor, |
| longformer_attention_mask: torch.Tensor, |
| layer_head_mask: torch.Tensor, |
| output_attentions: bool = False, |
| ) -> torch.Tensor: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)* |
| attention_mask (`torch.FloatTensor`): attention mask of size |
| *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. |
| longformer_attention_mask (:obj:`torch.FloatTensor`): attention mask of size |
| `(batch, src_len)` where 0=local, -1=global, 1=padding. |
| layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size |
| *(encoder_attention_heads,)*. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| """ |
| |
| if isinstance(self.self_attn, LongformerSelfAttentionForMBart): |
| attention_mask = longformer_attention_mask |
| residual = hidden_states |
| hidden_states = self.self_attn_layer_norm(hidden_states) |
| hidden_states, attn_weights, _ = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| layer_head_mask=layer_head_mask, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| residual = hidden_states |
| hidden_states = self.final_layer_norm(hidden_states) |
| hidden_states = self.activation_fn(self.fc1(hidden_states)) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) |
| hidden_states = self.fc2(hidden_states) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| if hidden_states.dtype == torch.float16 and ( |
| torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() |
| ): |
| clamp_value = torch.finfo(hidden_states.dtype).max - 1000 |
| hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
| |
| class Longformer(RobertaModel): |
| def __init__(self, config): |
| super(Longformer, self).__init__(config) |
| if config.attention_mode == 'n2': |
| pass |
| else: |
| for i, layer in enumerate(self.encoder.layer): |
| layer.attention.self = LongformerSelfAttention(config, layer_id=i) |
|
|
|
|
| class LongformerForMaskedLM(RobertaForMaskedLM): |
| def __init__(self, config): |
| super(LongformerForMaskedLM, self).__init__(config) |
| if config.attention_mode == 'n2': |
| pass |
| else: |
| for i, layer in enumerate(self.roberta.encoder.layer): |
| layer.attention.self = LongformerSelfAttention(config, layer_id=i) |
|
|
|
|
| class LongformerConfig(RobertaConfig): |
| def __init__(self, attention_window: List[int] = None, attention_dilation: List[int] = None, |
| autoregressive: bool = False, attention_mode: str = 'sliding_chunks', **kwargs): |
| """ |
| Args: |
| attention_window: list of attention window sizes of length = number of layers. |
| window size = number of attention locations on each side. |
| For an affective window size of 512, use `attention_window=[256]*num_layers` |
| which is 256 on each side. |
| attention_dilation: list of attention dilation of length = number of layers. |
| attention dilation of `1` means no dilation. |
| autoregressive: do autoregressive attention or have attention of both sides |
| attention_mode: 'n2' for regular n^2 self-attention, 'tvm' for TVM implemenation of Longformer |
| selfattention, 'sliding_chunks' for another implementation of Longformer selfattention |
| """ |
| super().__init__(**kwargs) |
| self.attention_window = attention_window |
| self.attention_dilation = attention_dilation |
| self.autoregressive = autoregressive |
| self.attention_mode = attention_mode |
| assert self.attention_mode in ['sliding_chunks', 'n2', 'sliding_chunks_no_overlap'] |
|
|
|
|
| class LongformerSelfAttention(nn.Module): |
| def __init__(self, config, layer_id): |
| super(LongformerSelfAttention, self).__init__() |
| if config.hidden_size % config.num_attention_heads != 0: |
| raise ValueError( |
| "The hidden size (%d) is not a multiple of the number of attention " |
| "heads (%d)" % (config.hidden_size, config.num_attention_heads)) |
| self.num_heads = config.num_attention_heads |
| self.head_dim = int(config.hidden_size / config.num_attention_heads) |
| self.embed_dim = config.hidden_size |
|
|
| self.query = nn.Linear(config.hidden_size, self.embed_dim) |
| self.key = nn.Linear(config.hidden_size, self.embed_dim) |
| self.value = nn.Linear(config.hidden_size, self.embed_dim) |
|
|
| self.query_global = nn.Linear(config.hidden_size, self.embed_dim) |
| self.key_global = nn.Linear(config.hidden_size, self.embed_dim) |
| self.value_global = nn.Linear(config.hidden_size, self.embed_dim) |
|
|
| self.dropout = config.attention_probs_dropout_prob |
|
|
| self.layer_id = layer_id |
| self.attention_window = config.attention_window[self.layer_id] |
| self.attention_dilation = config.attention_dilation[self.layer_id] |
| self.attention_mode = config.attention_mode |
| self.autoregressive = config.autoregressive |
| assert self.attention_window > 0 |
| assert self.attention_dilation > 0 |
| assert self.attention_mode in ['sliding_chunks', 'sliding_chunks_no_overlap'] |
| if self.attention_mode in ['sliding_chunks', 'sliding_chunks_no_overlap']: |
| assert not self.autoregressive |
| assert self.attention_dilation == 1 |
|
|
| def forward( |
| self, |
| hidden_states, |
| attention_mask=None, |
| head_mask=None, |
| encoder_hidden_states=None, |
| encoder_attention_mask=None, |
| output_attentions=False, |
| ): |
| ''' |
| The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to |
| -ve: no attention |
| 0: local attention |
| +ve: global attention |
| ''' |
| assert encoder_hidden_states is None, "`encoder_hidden_states` is not supported and should be None" |
| assert encoder_attention_mask is None, "`encoder_attention_mask` is not supported and should be None" |
|
|
| if attention_mask is not None: |
| key_padding_mask = attention_mask < 0 |
| extra_attention_mask = attention_mask > 0 |
| remove_from_windowed_attention_mask = attention_mask != 0 |
|
|
| num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1) |
| max_num_extra_indices_per_batch = num_extra_indices_per_batch.max() |
| if max_num_extra_indices_per_batch <= 0: |
| extra_attention_mask = None |
| else: |
| |
| |
| |
| |
| extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True) |
| zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, |
| device=num_extra_indices_per_batch.device) |
| |
| selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1) |
| |
| selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True) |
| |
| selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True) |
| else: |
| remove_from_windowed_attention_mask = None |
| extra_attention_mask = None |
| key_padding_mask = None |
|
|
| hidden_states = hidden_states.transpose(0, 1) |
| seq_len, bsz, embed_dim = hidden_states.size() |
| assert embed_dim == self.embed_dim |
| q = self.query(hidden_states) |
| k = self.key(hidden_states) |
| v = self.value(hidden_states) |
| q /= math.sqrt(self.head_dim) |
|
|
| q = q.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) |
| k = k.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) |
| |
| if self.attention_mode == "sliding_chunks": |
| attn_weights = sliding_chunks_matmul_qk(q, k, self.attention_window, padding_value=0) |
| elif self.attention_mode == "sliding_chunks_no_overlap": |
| attn_weights = sliding_chunks_no_overlap_matmul_qk(q, k, self.attention_window, padding_value=0) |
| else: |
| raise False |
| mask_invalid_locations(attn_weights, self.attention_window, self.attention_dilation, False) |
| if remove_from_windowed_attention_mask is not None: |
| |
| |
| remove_from_windowed_attention_mask = remove_from_windowed_attention_mask.unsqueeze(dim=-1).unsqueeze(dim=-1) |
| |
| float_mask = remove_from_windowed_attention_mask.type_as(q).masked_fill(remove_from_windowed_attention_mask, -10000.0) |
| repeat_size = 1 if isinstance(self.attention_dilation, int) else len(self.attention_dilation) |
| float_mask = float_mask.repeat(1, 1, repeat_size, 1) |
| ones = float_mask.new_ones(size=float_mask.size()) |
| |
| if self.attention_mode == "sliding_chunks": |
| d_mask = sliding_chunks_matmul_qk(ones, float_mask, self.attention_window, padding_value=0) |
| elif self.attention_mode == "sliding_chunks_no_overlap": |
| d_mask = sliding_chunks_no_overlap_matmul_qk(ones, float_mask, self.attention_window, padding_value=0) |
|
|
| attn_weights += d_mask |
| assert list(attn_weights.size())[:3] == [bsz, seq_len, self.num_heads] |
| assert attn_weights.size(dim=3) in [self.attention_window * 2 + 1, self.attention_window * 3] |
|
|
| |
| if extra_attention_mask is not None: |
| selected_k = k.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, self.head_dim) |
| selected_k[selection_padding_mask_nonzeros] = k[extra_attention_mask_nonzeros] |
| |
| selected_attn_weights = torch.einsum('blhd,bshd->blhs', (q, selected_k)) |
| selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000 |
| |
| |
| attn_weights = torch.cat((selected_attn_weights, attn_weights), dim=-1) |
| attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) |
| if key_padding_mask is not None: |
| |
| attn_weights_float = torch.masked_fill(attn_weights_float, key_padding_mask.unsqueeze(-1).unsqueeze(-1), 0.0) |
| attn_weights = attn_weights_float.type_as(attn_weights) |
| attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) |
| v = v.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) |
| attn = 0 |
| if extra_attention_mask is not None: |
| selected_attn_probs = attn_probs.narrow(-1, 0, max_num_extra_indices_per_batch) |
| selected_v = v.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, self.head_dim) |
| selected_v[selection_padding_mask_nonzeros] = v[extra_attention_mask_nonzeros] |
| |
| |
| attn = torch.matmul(selected_attn_probs.transpose(1, 2), selected_v.transpose(1, 2).type_as(selected_attn_probs)).transpose(1, 2) |
| attn_probs = attn_probs.narrow(-1, max_num_extra_indices_per_batch, attn_probs.size(-1) - max_num_extra_indices_per_batch).contiguous() |
|
|
| if self.attention_mode == "sliding_chunks": |
| attn += sliding_chunks_matmul_pv(attn_probs, v, self.attention_window) |
| elif self.attention_mode == "sliding_chunks_no_overlap": |
| attn += sliding_chunks_no_overlap_matmul_pv(attn_probs, v, self.attention_window) |
| else: |
| raise False |
|
|
| attn = attn.type_as(hidden_states) |
| assert list(attn.size()) == [bsz, seq_len, self.num_heads, self.head_dim] |
| attn = attn.transpose(0, 1).reshape(seq_len, bsz, embed_dim).contiguous() |
|
|
| |
| |
| if extra_attention_mask is not None: |
| selected_hidden_states = hidden_states.new_zeros(max_num_extra_indices_per_batch, bsz, embed_dim) |
| selected_hidden_states[selection_padding_mask_nonzeros[::-1]] = hidden_states[extra_attention_mask_nonzeros[::-1]] |
|
|
| q = self.query_global(selected_hidden_states) |
| k = self.key_global(hidden_states) |
| v = self.value_global(hidden_states) |
| q /= math.sqrt(self.head_dim) |
|
|
| q = q.contiguous().view(max_num_extra_indices_per_batch, bsz * self.num_heads, self.head_dim).transpose(0, 1) |
| k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) |
| v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) |
| attn_weights = torch.bmm(q, k.transpose(1, 2)) |
| assert list(attn_weights.size()) == [bsz * self.num_heads, max_num_extra_indices_per_batch, seq_len] |
|
|
| attn_weights = attn_weights.view(bsz, self.num_heads, max_num_extra_indices_per_batch, seq_len) |
| attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000.0 |
| if key_padding_mask is not None: |
| attn_weights = attn_weights.masked_fill( |
| key_padding_mask.unsqueeze(1).unsqueeze(2), |
| -10000.0, |
| ) |
| attn_weights = attn_weights.view(bsz * self.num_heads, max_num_extra_indices_per_batch, seq_len) |
| attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) |
| attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) |
| selected_attn = torch.bmm(attn_probs, v) |
| assert list(selected_attn.size()) == [bsz * self.num_heads, max_num_extra_indices_per_batch, self.head_dim] |
|
|
| selected_attn_4d = selected_attn.view(bsz, self.num_heads, max_num_extra_indices_per_batch, self.head_dim) |
| nonzero_selected_attn = selected_attn_4d[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]] |
| attn[extra_attention_mask_nonzeros[::-1]] = nonzero_selected_attn.view(len(selection_padding_mask_nonzeros[0]), -1).type_as(hidden_states) |
|
|
| context_layer = attn.transpose(0, 1) |
| if output_attentions: |
| if extra_attention_mask is not None: |
| |
| |
| |
| |
| |
| |
| attn_weights = attn_weights.view(bsz, self.num_heads, max_num_extra_indices_per_batch, seq_len) |
| else: |
| |
| |
| |
| attn_weights = attn_weights.permute(0, 2, 1, 3) |
| outputs = (context_layer, attn_weights) if output_attentions else (context_layer,) |
| return outputs |
| |
| def _skew(x, direction, padding_value): |
| '''Convert diagonals into columns (or columns into diagonals depending on `direction`''' |
| x_padded = F.pad(x, direction, value=padding_value) |
| x_padded = x_padded.view(*x_padded.size()[:-2], x_padded.size(-1), x_padded.size(-2)) |
| return x_padded |
|
|
|
|
| def _skew2(x, padding_value): |
| '''shift every row 1 step to right converting columns into diagonals''' |
| |
| B, C, M, L = x.size() |
| x = F.pad(x, (0, M + 1), value=padding_value) |
| x = x.view(B, C, -1) |
| x = x[:, :, :-M] |
| x = x.view(B, C, M, M + L) |
| x = x[:, :, :, :-1] |
| return x |
|
|
|
|
| def _chunk(x, w): |
| '''convert into overlapping chunkings. Chunk size = 2w, overlap size = w''' |
|
|
| |
| x = x.view(x.size(0), x.size(1) // (w * 2), w * 2, x.size(2)) |
|
|
| |
| chunk_size = list(x.size()) |
| chunk_size[1] = chunk_size[1] * 2 - 1 |
|
|
| chunk_stride = list(x.stride()) |
| chunk_stride[1] = chunk_stride[1] // 2 |
| return x.as_strided(size=chunk_size, stride=chunk_stride) |
|
|
|
|
| def sliding_chunks_matmul_qk(q: torch.Tensor, k: torch.Tensor, w: int, padding_value: float): |
| '''Matrix multiplicatio of query x key tensors using with a sliding window attention pattern. |
| This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) |
| with an overlap of size w''' |
| bsz, seqlen, num_heads, head_dim = q.size() |
| assert seqlen % (w * 2) == 0 |
| assert q.size() == k.size() |
|
|
| chunks_count = seqlen // w - 1 |
|
|
| |
| q = q.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim) |
| k = k.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim) |
|
|
| chunk_q = _chunk(q, w) |
| chunk_k = _chunk(k, w) |
|
|
| |
| |
| |
| |
| chunk_attn = torch.einsum('bcxd,bcyd->bcxy', (chunk_q, chunk_k)) |
|
|
| |
| diagonal_chunk_attn = _skew(chunk_attn, direction=(0, 0, 0, 1), padding_value=padding_value) |
|
|
| |
| |
| |
| |
|
|
| diagonal_attn = diagonal_chunk_attn.new_empty((bsz * num_heads, chunks_count + 1, w, w * 2 + 1)) |
|
|
| |
| |
| diagonal_attn[:, :-1, :, w:] = diagonal_chunk_attn[:, :, :w, :w + 1] |
| diagonal_attn[:, -1, :, w:] = diagonal_chunk_attn[:, -1, w:, :w + 1] |
| |
| diagonal_attn[:, 1:, :, :w] = diagonal_chunk_attn[:, :, - (w + 1):-1, w + 1:] |
| diagonal_attn[:, 0, 1:w, 1:w] = diagonal_chunk_attn[:, 0, :w - 1, 1 - w:] |
|
|
| |
| diagonal_attn = diagonal_attn.view(bsz, num_heads, seqlen, 2 * w + 1).transpose(2, 1) |
|
|
| mask_invalid_locations(diagonal_attn, w, 1, False) |
| return diagonal_attn |
|
|
|
|
| def sliding_chunks_matmul_pv(prob: torch.Tensor, v: torch.Tensor, w: int): |
| '''Same as sliding_chunks_matmul_qk but for prob and value tensors. It is expecting the same output |
| format from sliding_chunks_matmul_qk''' |
| bsz, seqlen, num_heads, head_dim = v.size() |
| assert seqlen % (w * 2) == 0 |
| assert prob.size()[:3] == v.size()[:3] |
| assert prob.size(3) == 2 * w + 1 |
| chunks_count = seqlen // w - 1 |
| |
| chunk_prob = prob.transpose(1, 2).reshape(bsz * num_heads, seqlen // w, w, 2 * w + 1) |
|
|
| |
| v = v.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim) |
|
|
| |
| padded_v = F.pad(v, (0, 0, w, w), value=-1) |
|
|
| |
| chunk_v_size = (bsz * num_heads, chunks_count + 1, 3 * w, head_dim) |
| chunk_v_stride = padded_v.stride() |
| chunk_v_stride = chunk_v_stride[0], w * chunk_v_stride[1], chunk_v_stride[1], chunk_v_stride[2] |
| chunk_v = padded_v.as_strided(size=chunk_v_size, stride=chunk_v_stride) |
|
|
| skewed_prob = _skew2(chunk_prob, padding_value=0) |
|
|
| context = torch.einsum('bcwd,bcdh->bcwh', (skewed_prob, chunk_v)) |
| return context.view(bsz, num_heads, seqlen, head_dim).transpose(1, 2) |
|
|
|
|
| def pad_to_window_size(input_ids: torch.Tensor, attention_mask: torch.Tensor, |
| one_sided_window_size: int, pad_token_id: int): |
| '''A helper function to pad tokens and mask to work with the sliding_chunks implementation of Longformer selfattention. |
| Input: |
| input_ids = torch.Tensor(bsz x seqlen): ids of wordpieces |
| attention_mask = torch.Tensor(bsz x seqlen): attention mask |
| one_sided_window_size = int: window size on one side of each token |
| pad_token_id = int: tokenizer.pad_token_id |
| Returns |
| (input_ids, attention_mask) padded to length divisible by 2 * one_sided_window_size |
| ''' |
| w = int(2 * one_sided_window_size) |
| seqlen = input_ids.size(1) |
| padding_len = (w - seqlen % w) % w |
| input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id) |
| attention_mask = F.pad(attention_mask, (0, padding_len), value=False) |
| return input_ids, attention_mask |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| def sliding_chunks_no_overlap_matmul_qk(q: torch.Tensor, k: torch.Tensor, w: int, padding_value: float): |
| bsz, seqlen, num_heads, head_dim = q.size() |
| assert seqlen % w == 0 |
| assert q.size() == k.size() |
| |
| chunk_q = q.view(bsz, seqlen // w, w, num_heads, head_dim) |
| chunk_k = k.view(bsz, seqlen // w, w, num_heads, head_dim) |
| chunk_k_expanded = torch.stack(( |
| F.pad(chunk_k[:, :-1], (0, 0, 0, 0, 0, 0, 1, 0), value=0.0), |
| chunk_k, |
| F.pad(chunk_k[:, 1:], (0, 0, 0, 0, 0, 0, 0, 1), value=0.0), |
| ), dim=-1) |
| diagonal_attn = torch.einsum('bcxhd,bcyhde->bcxhey', (chunk_q, chunk_k_expanded)) |
| return diagonal_attn.reshape(bsz, seqlen, num_heads, 3 * w) |
|
|
|
|
| def sliding_chunks_no_overlap_matmul_pv(prob: torch.Tensor, v: torch.Tensor, w: int): |
| bsz, seqlen, num_heads, head_dim = v.size() |
| chunk_prob = prob.view(bsz, seqlen // w, w, num_heads, 3, w) |
| chunk_v = v.view(bsz, seqlen // w, w, num_heads, head_dim) |
| chunk_v_extended = torch.stack(( |
| F.pad(chunk_v[:, :-1], (0, 0, 0, 0, 0, 0, 1, 0), value=0.0), |
| chunk_v, |
| F.pad(chunk_v[:, 1:], (0, 0, 0, 0, 0, 0, 0, 1), value=0.0), |
| ), dim=-1) |
| context = torch.einsum('bcwhpd,bcdhep->bcwhe', (chunk_prob, chunk_v_extended)) |
| return context.reshape(bsz, seqlen, num_heads, head_dim) |
|
|
| def _get_invalid_locations_mask_fixed_dilation(seq_len: int, w: int, d: int): |
| diagonals_list = [] |
| for j in range(-d * w, d, d): |
| diagonal_mask = torch.zeros(seq_len, device='cpu', dtype=torch.uint8) |
| diagonal_mask[:-j] = 1 |
| diagonals_list.append(diagonal_mask) |
| return torch.stack(diagonals_list, dim=-1) |
|
|
| @lru_cache() |
| def _get_invalid_locations_mask(w: int, d: Union[torch.Tensor,int], autoregressive: bool, device: str): |
| if isinstance(d, int): |
| affected_seq_len = w * d |
| mask = _get_invalid_locations_mask_fixed_dilation(affected_seq_len, w, d) |
| mask = mask[None, :, None, :] |
| else: |
| affected_seq_len = w * d.max() |
| head_masks = [] |
| d_list = d.cpu().numpy().tolist() |
| for d in d_list: |
| one_head_mask = _get_invalid_locations_mask_fixed_dilation(affected_seq_len, w, d) |
| head_masks.append(one_head_mask) |
| mask = torch.stack(head_masks, dim=-2) |
| mask = mask[None, :, :, :] |
|
|
| ending_mask = None if autoregressive else mask.flip(dims=(1, 3)).bool().to(device) |
| return affected_seq_len, mask.bool().to(device), ending_mask |
|
|
| def mask_invalid_locations(input_tensor: torch.Tensor, w: int, d: Union[torch.Tensor, int], autoregressive: bool) -> torch.Tensor: |
| affected_seq_len, beginning_mask, ending_mask = _get_invalid_locations_mask(w, d, autoregressive, input_tensor.device) |
| seq_len = input_tensor.size(1) |
| beginning_input = input_tensor[:, :affected_seq_len, :, :w+1] |
| beginning_mask = beginning_mask[:, :seq_len].expand(beginning_input.size()) |
| beginning_input.masked_fill_(beginning_mask, -float('inf')) |
| if not autoregressive: |
| ending_input = input_tensor[:, -affected_seq_len:, :, -(w+1):] |
| ending_mask = ending_mask[:, -seq_len:].expand(ending_input.size()) |
| ending_input.masked_fill_(ending_mask, -float('inf')) |
|
|