| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ PyTorch Whisper model.""" |
| import math |
| from typing import Optional, Tuple, Union |
| from transformers.utils import ModelOutput |
| import numpy as np |
| import torch |
| import torch.nn.functional as F |
| import torch.utils.checkpoint |
| from torch import nn |
| from torch.nn import CrossEntropyLoss |
|
|
| from transformers.activations import ACT2FN |
| from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa |
| from transformers.modeling_outputs import ( |
| BaseModelOutput, |
| BaseModelOutputWithPastAndCrossAttentions, |
| CausalLMOutputWithCrossAttentions, |
| Seq2SeqLMOutput, |
| Seq2SeqModelOutput, |
| SequenceClassifierOutput, |
| ) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import ( |
| add_start_docstrings, |
| add_start_docstrings_to_model_forward, |
| is_flash_attn_2_available, |
| is_flash_attn_greater_or_equal_2_10, |
| logging, |
| replace_return_docstrings, |
| ) |
| from .configuration_whisper import WhisperConfig |
| from transformers.models.whisper.generation_whisper import WhisperGenerationMixin |
|
|
|
|
| if is_flash_attn_2_available(): |
| from flash_attn import flash_attn_func, flash_attn_varlen_func |
| from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
| _HIDDEN_STATES_START_POSITION = 1 |
|
|
| _CONFIG_FOR_DOC = "WhisperConfig" |
| _CHECKPOINT_FOR_DOC = "openai/whisper-tiny" |
|
|
|
|
| |
| def _get_unpad_data(attention_mask): |
| seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) |
| indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() |
| max_seqlen_in_batch = seqlens_in_batch.max().item() |
| cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) |
| return ( |
| indices, |
| cu_seqlens, |
| max_seqlen_in_batch, |
| ) |
|
|
|
|
| def sinusoids(length: int, channels: int, max_timescale: float = 10000) -> torch.Tensor: |
| """Returns sinusoids for positional embedding""" |
| if channels % 2 != 0: |
| raise ValueError( |
| f"Number of channels has to be divisible by 2 for sinusoidal positional embeddings, got {channels} channels." |
| ) |
| log_timescale_increment = math.log(max_timescale) / (channels // 2 - 1) |
| inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2)) |
| scaled_time = torch.arange(length).view(-1, 1) * inv_timescales.view(1, -1) |
| return torch.cat([scaled_time.sin(), scaled_time.cos()], dim=1) |
|
|
|
|
| |
| def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): |
| """ |
| Shift input ids one token to the right. |
| """ |
| shifted_input_ids = input_ids.new_zeros(input_ids.shape) |
| shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() |
| shifted_input_ids[:, 0] = decoder_start_token_id |
|
|
| if pad_token_id is None: |
| raise ValueError("self.model.config.pad_token_id has to be defined.") |
| |
| shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) |
|
|
| return shifted_input_ids |
|
|
|
|
| |
| def _compute_mask_indices( |
| shape: Tuple[int, int], |
| mask_prob: float, |
| mask_length: int, |
| attention_mask: Optional[torch.LongTensor] = None, |
| min_masks: int = 0, |
| ) -> np.ndarray: |
| """ |
| Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for |
| ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on |
| CPU as part of the preprocessing during training. |
| |
| Args: |
| shape: The shape for which to compute masks. This should be of a tuple of size 2 where |
| the first element is the batch size and the second element is the length of the axis to span. |
| mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of |
| independently generated mask spans of length `mask_length` is computed by |
| `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the |
| actual percentage will be smaller. |
| mask_length: size of the mask |
| min_masks: minimum number of masked spans |
| attention_mask: A (right-padded) attention mask which independently shortens the feature axis of |
| each batch dimension. |
| """ |
| batch_size, sequence_length = shape |
|
|
| if mask_length < 1: |
| raise ValueError("`mask_length` has to be bigger than 0.") |
|
|
| if mask_length > sequence_length: |
| raise ValueError( |
| f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" |
| f" and `sequence_length`: {sequence_length}`" |
| ) |
|
|
| |
| epsilon = np.random.rand(1).item() |
|
|
| def compute_num_masked_span(input_length): |
| """Given input length, compute how many spans should be masked""" |
| num_masked_span = int(mask_prob * input_length / mask_length + epsilon) |
| num_masked_span = max(num_masked_span, min_masks) |
|
|
| |
| if num_masked_span * mask_length > sequence_length: |
| num_masked_span = sequence_length // mask_length |
|
|
| |
| if input_length - (mask_length - 1) < num_masked_span: |
| num_masked_span = max(input_length - (mask_length - 1), 0) |
|
|
| return num_masked_span |
|
|
| |
| input_lengths = ( |
| attention_mask.sum(-1).detach().tolist() |
| if attention_mask is not None |
| else [sequence_length for _ in range(batch_size)] |
| ) |
|
|
| |
| spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) |
| spec_aug_mask_idxs = [] |
|
|
| max_num_masked_span = compute_num_masked_span(sequence_length) |
|
|
| if max_num_masked_span == 0: |
| return spec_aug_mask |
|
|
| for input_length in input_lengths: |
| |
| num_masked_span = compute_num_masked_span(input_length) |
|
|
| |
| spec_aug_mask_idx = np.random.choice( |
| np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False |
| ) |
|
|
| |
| |
| |
| if len(spec_aug_mask_idx) == 0: |
| |
| |
| |
| dummy_mask_idx = sequence_length - 1 |
| else: |
| dummy_mask_idx = spec_aug_mask_idx[0] |
|
|
| spec_aug_mask_idx = np.concatenate( |
| [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] |
| ) |
| spec_aug_mask_idxs.append(spec_aug_mask_idx) |
|
|
| spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) |
|
|
| |
| spec_aug_mask_idxs = np.broadcast_to( |
| spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) |
| ) |
| spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) |
|
|
| |
| offsets = np.arange(mask_length)[None, None, :] |
| offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( |
| batch_size, max_num_masked_span * mask_length |
| ) |
| spec_aug_mask_idxs = spec_aug_mask_idxs + offsets |
|
|
| |
| if spec_aug_mask_idxs.max() > sequence_length - 1: |
| spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 |
|
|
| |
| np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) |
|
|
| return spec_aug_mask |
|
|
|
|
| class WhisperPositionalEmbedding(nn.Embedding): |
| def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): |
| super().__init__(num_positions, embedding_dim) |
|
|
| def forward(self, input_ids, past_key_values_length=0, position_ids=None): |
| if position_ids is None: |
| return self.weight[past_key_values_length : past_key_values_length + input_ids.shape[1]] |
| else: |
| return self.weight[position_ids] |
|
|
|
|
| class WhisperAttention(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| def __init__( |
| self, |
| embed_dim: int, |
| num_heads: int, |
| dropout: float = 0.0, |
| is_decoder: bool = False, |
| bias: bool = True, |
| is_causal: bool = False, |
| config: Optional[WhisperConfig] = None, |
| ): |
| super().__init__() |
| self.embed_dim = embed_dim |
| self.num_heads = num_heads |
| self.dropout = dropout |
| self.head_dim = embed_dim // num_heads |
| self.config = config |
|
|
| if (self.head_dim * num_heads) != self.embed_dim: |
| raise ValueError( |
| f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" |
| f" and `num_heads`: {num_heads})." |
| ) |
| self.scaling = self.head_dim**-0.5 |
| self.is_decoder = is_decoder |
| self.is_causal = is_causal |
|
|
| self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) |
| self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
| self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
| self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
|
|
| |
| def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
| return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() |
|
|
| |
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| key_value_states: Optional[torch.Tensor] = None, |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| layer_head_mask: Optional[torch.Tensor] = None, |
| output_attentions: bool = False, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| """Input shape: Batch x Time x Channel""" |
|
|
| |
| |
| is_cross_attention = key_value_states is not None |
|
|
| bsz, tgt_len, _ = hidden_states.size() |
|
|
| |
| query_states = self.q_proj(hidden_states) * self.scaling |
| |
| |
| |
| |
| if ( |
| is_cross_attention |
| and past_key_value is not None |
| and past_key_value[0].shape[2] == key_value_states.shape[1] |
| ): |
| |
| key_states = past_key_value[0] |
| value_states = past_key_value[1] |
| elif is_cross_attention: |
| |
| key_states = self._shape(self.k_proj(key_value_states), -1, bsz) |
| value_states = self._shape(self.v_proj(key_value_states), -1, bsz) |
| elif past_key_value is not None: |
| |
| key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
| value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
| key_states = torch.cat([past_key_value[0], key_states], dim=2) |
| value_states = torch.cat([past_key_value[1], value_states], dim=2) |
| else: |
| |
| key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
| value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
|
|
| if self.is_decoder: |
| |
| |
| |
| |
| |
| |
| |
| past_key_value = (key_states, value_states) |
|
|
| proj_shape = (bsz * self.num_heads, -1, self.head_dim) |
| query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) |
| key_states = key_states.reshape(*proj_shape) |
| value_states = value_states.reshape(*proj_shape) |
|
|
| src_len = key_states.size(1) |
| attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) |
|
|
| if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): |
| raise ValueError( |
| f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" |
| f" {attn_weights.size()}" |
| ) |
|
|
| if attention_mask is not None: |
| |
| |
| |
| |
| attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask |
| attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
|
|
| if layer_head_mask is not None: |
| if layer_head_mask.size() != (self.num_heads,): |
| raise ValueError( |
| f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" |
| f" {layer_head_mask.size()}" |
| ) |
| attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) |
| attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) |
|
|
| if output_attentions: |
| |
| |
| |
| |
| attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) |
| attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) |
| else: |
| attn_weights_reshaped = None |
|
|
| attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) |
|
|
| attn_output = torch.bmm(attn_probs, value_states) |
|
|
| if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): |
| raise ValueError( |
| f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" |
| f" {attn_output.size()}" |
| ) |
|
|
| attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) |
| attn_output = attn_output.transpose(1, 2) |
|
|
| |
| |
| attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) |
|
|
| attn_output = self.out_proj(attn_output) |
|
|
| return attn_output, attn_weights_reshaped, past_key_value |
|
|
|
|
| |
| class WhisperFlashAttention2(WhisperAttention): |
| """ |
| Whisper flash attention module. This module inherits from `WhisperAttention` as the weights of the module stays |
| untouched. The only required change would be on the forward pass where it needs to correctly call the public API of |
| flash attention and deal with padding tokens in case the input contains any of them. |
| """ |
|
|
| |
| def __init__(self, *args, **kwargs): |
| super().__init__(*args, **kwargs) |
|
|
| |
| |
| |
| self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() |
|
|
| def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
| return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| key_value_states: Optional[torch.Tensor] = None, |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| layer_head_mask: Optional[torch.Tensor] = None, |
| output_attentions: bool = False, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| |
| if output_attentions: |
| raise ValueError("WhisperFlashAttention2 attention does not support output_attentions") |
|
|
| |
| |
| is_cross_attention = key_value_states is not None |
|
|
| bsz, q_len, _ = hidden_states.size() |
|
|
| |
| query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) |
| |
| |
| |
| |
| if ( |
| is_cross_attention |
| and past_key_value is not None |
| and past_key_value[0].shape[2] == key_value_states.shape[1] |
| ): |
| |
| key_states = past_key_value[0].transpose(1, 2) |
| value_states = past_key_value[1].transpose(1, 2) |
| elif is_cross_attention: |
| |
| key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) |
| value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) |
| elif past_key_value is not None: |
| |
| key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) |
| value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) |
| key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) |
| value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) |
| else: |
| |
| key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) |
| value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) |
|
|
| if self.is_decoder: |
| |
| |
| |
| |
| |
| |
| |
| past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) |
|
|
| kv_seq_len = key_states.shape[-2] |
| if past_key_value is not None: |
| kv_seq_len += past_key_value[0].shape[-2] |
|
|
| |
| |
| |
| |
| |
|
|
| input_dtype = query_states.dtype |
| if input_dtype == torch.float32: |
| if torch.is_autocast_enabled(): |
| target_dtype = torch.get_autocast_gpu_dtype() |
| |
| elif hasattr(self.config, "_pre_quantization_dtype"): |
| target_dtype = self.config._pre_quantization_dtype |
| else: |
| target_dtype = self.q_proj.weight.dtype |
|
|
| logger.warning_once( |
| f"The input hidden states seems to be silently casted in float32, this might be related to" |
| f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" |
| f" {target_dtype}." |
| ) |
|
|
| query_states = query_states.to(target_dtype) |
| key_states = key_states.to(target_dtype) |
| value_states = value_states.to(target_dtype) |
|
|
| attn_output = self._flash_attention_forward( |
| query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout |
| ) |
|
|
| attn_output = attn_output.reshape(bsz, q_len, -1) |
| attn_output = self.out_proj(attn_output) |
|
|
| if not output_attentions: |
| attn_weights = None |
|
|
| return attn_output, attn_weights, past_key_value |
|
|
| |
| def _flash_attention_forward( |
| self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None |
| ): |
| """ |
| Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token |
| first unpad the input, then computes the attention scores and pad the final attention scores. |
| |
| Args: |
| query_states (`torch.Tensor`): |
| Input query states to be passed to Flash Attention API |
| key_states (`torch.Tensor`): |
| Input key states to be passed to Flash Attention API |
| value_states (`torch.Tensor`): |
| Input value states to be passed to Flash Attention API |
| attention_mask (`torch.Tensor`): |
| The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the |
| position of padding tokens and 1 for the position of non-padding tokens. |
| dropout (`float`): |
| Attention dropout |
| softmax_scale (`float`, *optional*): |
| The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) |
| """ |
| if not self._flash_attn_uses_top_left_mask: |
| causal = self.is_causal |
| else: |
| |
| causal = self.is_causal and query_length != 1 |
|
|
| |
| if attention_mask is not None: |
| batch_size = query_states.shape[0] |
| query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( |
| query_states, key_states, value_states, attention_mask, query_length |
| ) |
|
|
| cu_seqlens_q, cu_seqlens_k = cu_seq_lens |
| max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens |
|
|
| attn_output_unpad = flash_attn_varlen_func( |
| query_states, |
| key_states, |
| value_states, |
| cu_seqlens_q=cu_seqlens_q, |
| cu_seqlens_k=cu_seqlens_k, |
| max_seqlen_q=max_seqlen_in_batch_q, |
| max_seqlen_k=max_seqlen_in_batch_k, |
| dropout_p=dropout, |
| softmax_scale=softmax_scale, |
| causal=causal, |
| ) |
|
|
| attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) |
| else: |
| attn_output = flash_attn_func( |
| query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal |
| ) |
|
|
| return attn_output |
|
|
| |
| def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): |
| indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) |
| batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape |
|
|
| key_layer = index_first_axis( |
| key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
| ) |
| value_layer = index_first_axis( |
| value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
| ) |
| if query_length == kv_seq_len: |
| query_layer = index_first_axis( |
| query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k |
| ) |
| cu_seqlens_q = cu_seqlens_k |
| max_seqlen_in_batch_q = max_seqlen_in_batch_k |
| indices_q = indices_k |
| elif query_length == 1: |
| max_seqlen_in_batch_q = 1 |
| cu_seqlens_q = torch.arange( |
| batch_size + 1, dtype=torch.int32, device=query_layer.device |
| ) |
| indices_q = cu_seqlens_q[:-1] |
| query_layer = query_layer.squeeze(1) |
| else: |
| |
| attention_mask = attention_mask[:, -query_length:] |
| query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) |
|
|
| return ( |
| query_layer, |
| key_layer, |
| value_layer, |
| indices_q, |
| (cu_seqlens_q, cu_seqlens_k), |
| (max_seqlen_in_batch_q, max_seqlen_in_batch_k), |
| ) |
|
|
|
|
| class WhisperSdpaAttention(WhisperAttention): |
| |
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| key_value_states: Optional[torch.Tensor] = None, |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| layer_head_mask: Optional[torch.Tensor] = None, |
| output_attentions: bool = False, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| """Input shape: Batch x Time x Channel""" |
| if output_attentions or layer_head_mask is not None: |
| |
| logger.warning_once( |
| "WhisperModel is using WhisperSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention" |
| ' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| ) |
| return super().forward( |
| hidden_states, |
| key_value_states=key_value_states, |
| past_key_value=past_key_value, |
| attention_mask=attention_mask, |
| layer_head_mask=layer_head_mask, |
| output_attentions=output_attentions, |
| ) |
|
|
| |
| |
| is_cross_attention = key_value_states is not None |
|
|
| bsz, tgt_len, _ = hidden_states.size() |
|
|
| |
| query_states = self.q_proj(hidden_states) |
| |
| |
| |
| |
| if ( |
| is_cross_attention |
| and past_key_value is not None |
| and past_key_value[0].shape[2] == key_value_states.shape[1] |
| ): |
| |
| key_states = past_key_value[0] |
| value_states = past_key_value[1] |
| elif is_cross_attention: |
| |
| key_states = self._shape(self.k_proj(key_value_states), -1, bsz) |
| value_states = self._shape(self.v_proj(key_value_states), -1, bsz) |
| elif past_key_value is not None: |
| |
| key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
| value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
| key_states = torch.cat([past_key_value[0], key_states], dim=2) |
| value_states = torch.cat([past_key_value[1], value_states], dim=2) |
| else: |
| |
| key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
| value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
|
|
| if self.is_decoder: |
| |
| |
| |
| |
| |
| |
| |
| past_key_value = (key_states, value_states) |
|
|
| query_states = self._shape(query_states, tgt_len, bsz) |
|
|
| |
| |
| attn_output = torch.nn.functional.scaled_dot_product_attention( |
| query_states, |
| key_states, |
| value_states, |
| attn_mask=attention_mask, |
| dropout_p=self.dropout if self.training else 0.0, |
| |
| is_causal=self.is_causal and attention_mask is None and tgt_len > 1, |
| ) |
|
|
| if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): |
| raise ValueError( |
| f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" |
| f" {attn_output.size()}" |
| ) |
|
|
| attn_output = attn_output.transpose(1, 2) |
|
|
| |
| |
| attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) |
|
|
| attn_output = self.out_proj(attn_output) |
|
|
| return attn_output, None, past_key_value |
|
|
|
|
| WHISPER_ATTENTION_CLASSES = { |
| "eager": WhisperAttention, |
| "flash_attention_2": WhisperFlashAttention2, |
| "sdpa": WhisperSdpaAttention, |
| } |
|
|
|
|
| |
| class WhisperEncoderLayer(nn.Module): |
| def __init__(self, config: WhisperConfig): |
| super().__init__() |
| self.embed_dim = config.d_model |
|
|
| self.self_attn = WHISPER_ATTENTION_CLASSES[config._attn_implementation]( |
| embed_dim=self.embed_dim, |
| num_heads=config.encoder_attention_heads, |
| dropout=config.attention_dropout, |
| config=config, |
| ) |
| self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) |
| self.dropout = config.dropout |
| self.activation_fn = ACT2FN[config.activation_function] |
| self.activation_dropout = config.activation_dropout |
| self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) |
| self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) |
| self.final_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor, |
| layer_head_mask: torch.Tensor, |
| output_attentions: bool = False, |
| ) -> torch.Tensor: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| attention_mask (`torch.FloatTensor`): attention mask of size |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size |
| `(encoder_attention_heads,)`. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| """ |
| residual = hidden_states |
| hidden_states = self.self_attn_layer_norm(hidden_states) |
| hidden_states, attn_weights, _ = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| layer_head_mask=layer_head_mask, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| residual = hidden_states |
| hidden_states = self.final_layer_norm(hidden_states) |
| hidden_states = self.activation_fn(self.fc1(hidden_states)) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) |
| hidden_states = self.fc2(hidden_states) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| if hidden_states.dtype == torch.float16 and ( |
| torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() |
| ): |
| clamp_value = torch.finfo(hidden_states.dtype).max - 1000 |
| hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
| |
| class WhisperDecoderLayer(nn.Module): |
| def __init__(self, config: WhisperConfig): |
| super().__init__() |
| self.embed_dim = config.d_model |
|
|
| self.self_attn = WHISPER_ATTENTION_CLASSES[config._attn_implementation]( |
| embed_dim=self.embed_dim, |
| num_heads=config.decoder_attention_heads, |
| dropout=config.attention_dropout, |
| is_decoder=True, |
| is_causal=True, |
| config=config, |
| ) |
| self.dropout = config.dropout |
| self.activation_fn = ACT2FN[config.activation_function] |
| self.activation_dropout = config.activation_dropout |
|
|
| self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) |
| self.encoder_attn = WHISPER_ATTENTION_CLASSES[config._attn_implementation]( |
| self.embed_dim, |
| config.decoder_attention_heads, |
| dropout=config.attention_dropout, |
| is_decoder=True, |
| config=config, |
| ) |
| self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) |
| self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) |
| self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) |
| self.final_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.Tensor] = None, |
| layer_head_mask: Optional[torch.Tensor] = None, |
| cross_attn_layer_head_mask: Optional[torch.Tensor] = None, |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = True, |
| ) -> torch.Tensor: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| attention_mask (`torch.FloatTensor`): attention mask of size |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| encoder_hidden_states (`torch.FloatTensor`): |
| cross attention input to the layer of shape `(batch, seq_len, embed_dim)` |
| encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size |
| `(encoder_attention_heads,)`. |
| cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of |
| size `(decoder_attention_heads,)`. |
| past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| """ |
| residual = hidden_states |
| hidden_states = self.self_attn_layer_norm(hidden_states) |
|
|
| |
| |
| self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None |
| |
| hidden_states, self_attn_weights, present_key_value = self.self_attn( |
| hidden_states=hidden_states, |
| past_key_value=self_attn_past_key_value, |
| attention_mask=attention_mask, |
| layer_head_mask=layer_head_mask, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| |
| cross_attn_present_key_value = None |
| cross_attn_weights = None |
| if encoder_hidden_states is not None: |
| residual = hidden_states |
| hidden_states = self.encoder_attn_layer_norm(hidden_states) |
|
|
| |
| cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None |
| hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( |
| hidden_states=hidden_states, |
| key_value_states=encoder_hidden_states, |
| attention_mask=encoder_attention_mask, |
| layer_head_mask=cross_attn_layer_head_mask, |
| past_key_value=cross_attn_past_key_value, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| |
| present_key_value = present_key_value + cross_attn_present_key_value |
|
|
| |
| residual = hidden_states |
| hidden_states = self.final_layer_norm(hidden_states) |
| hidden_states = self.activation_fn(self.fc1(hidden_states)) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) |
| hidden_states = self.fc2(hidden_states) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (self_attn_weights, cross_attn_weights) |
|
|
| if use_cache: |
| outputs += (present_key_value,) |
|
|
| return outputs |
|
|
|
|
| class WhisperPreTrainedModel(PreTrainedModel): |
| config_class = WhisperConfig |
| base_model_prefix = "model" |
| main_input_name = "input_features" |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer"] |
| _supports_flash_attn_2 = True |
| _supports_sdpa = True |
|
|
| def _init_weights(self, module): |
| std = self.config.init_std |
| if isinstance(module, (nn.Linear, nn.Conv1d)): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, WhisperEncoder): |
| with torch.no_grad(): |
| embed_positions = module.embed_positions.weight |
| embed_positions.copy_(sinusoids(*embed_positions.shape)) |
|
|
| def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): |
| """ |
| Computes the output length of the convolutional layers |
| """ |
| input_lengths = (input_lengths - 1) // 2 + 1 |
|
|
| return input_lengths |
|
|
|
|
| WHISPER_START_DOCSTRING = r""" |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| etc.) |
| |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| and behavior. |
| |
| Parameters: |
| config ([`WhisperConfig`]): |
| Model configuration class with all the parameters of the model. Initializing with a config file does not |
| load the weights associated with the model, only the configuration. Check out the |
| [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
| WHISPER_INPUTS_DOCSTRING = r""" |
| Args: |
| input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`): |
| Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by |
| loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via |
| the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the |
| [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a |
| tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] |
| attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing *SpecAugment* data augmentation on padding token indices. Mask values selected in |
| `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): |
| Indices of decoder input sequence tokens in the vocabulary. |
| |
| Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are decoder input IDs?](../glossary#decoder-input-ids) |
| |
| Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If |
| `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see |
| `past_key_values`). |
| decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): |
| Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also |
| be used by default. |
| |
| If you want to change padding behavior, you should read |
| [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART |
| paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. |
| head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): |
| Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) |
| `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of |
| hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape |
| `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. |
| |
| Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention |
| blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. |
| |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. |
| decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): |
| Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded |
| representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be |
| input (see `past_key_values`). This is useful if you want more control over how to convert |
| `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
| WHISPER_ENCODER_INPUTS_DOCSTRING = r""" |
| Args: |
| input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`): |
| Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by |
| loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via |
| the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the |
| [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a |
| tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] |
| head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): |
| Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) |
| `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of |
| hidden-states at the output of the last layer of the encoder. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
|
|
| class WhisperEncoder(WhisperPreTrainedModel): |
| """ |
| Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a |
| [`WhisperEncoderLayer`]. |
| |
| Args: |
| config: WhisperConfig |
| """ |
|
|
| def __init__(self, config: WhisperConfig, avg_pool: bool = True): |
| super().__init__(config) |
| self.dropout = config.dropout |
| self.layerdrop = config.encoder_layerdrop |
|
|
| embed_dim = config.d_model |
| self.num_mel_bins = config.num_mel_bins |
| self.padding_idx = config.pad_token_id |
| self.max_source_positions = config.max_source_positions |
| self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 |
|
|
| self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1) |
| self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1) |
|
|
| self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim) |
| self.embed_positions.requires_grad_(False) |
|
|
| self.layers = nn.ModuleList([WhisperEncoderLayer(config) for _ in range(config.encoder_layers)]) |
| self.layer_norm = nn.LayerNorm(config.d_model) |
|
|
| self.gradient_checkpointing = False |
| self.config = config |
| |
| self.post_init() |
| if avg_pool: |
| self.avg_pooler = nn.AvgPool1d(2, stride=2) |
| else: |
| self.avg_pooler = None |
|
|
| def _freeze_parameters(self): |
| for param in self.parameters(): |
| param.requires_grad = False |
| self._requires_grad = False |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.conv1 |
|
|
| def set_input_embeddings(self, value: nn.Module): |
| self.conv1 = value |
|
|
| def forward( |
| self, |
| input_features, |
| attention_mask=None, |
| audio_len_after_cnn=None, |
| head_mask=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| ): |
| r""" |
| Args: |
| input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`): |
| Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be |
| obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a |
| `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into |
| `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding |
| and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] |
| attention_mask (`torch.Tensor`)`, *optional*): |
| Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, |
| but it is not used. By default the silence in the input log mel spectrogram are ignored. |
| head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0] |
| if input_features.shape[-1] != expected_seq_length: |
| raise ValueError( |
| f"Whisper expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}." |
| ) |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if audio_len_after_cnn is not None: |
| input_mel_len = audio_len_after_cnn * 2 |
| max_mel_len_in_batch = int(input_mel_len.max()) |
| input_features = input_features[:, :, :max_mel_len_in_batch] |
|
|
| inputs_embeds = nn.functional.gelu(self.conv1(input_features)) |
| inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds)) |
|
|
| inputs_embeds = inputs_embeds.permute(0, 2, 1) |
| embed_pos = self.embed_positions.weight |
|
|
| bsz = inputs_embeds.size(0) |
| src_len = inputs_embeds.size(1) |
|
|
| hidden_states = inputs_embeds + embed_pos[:src_len] |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
| if attention_mask is not None: |
| attention_mask = attention_mask.to(dtype=self.conv1.weight.dtype, |
| device=self.conv1.weight.device) |
| batch_src_len = attention_mask.size(1) |
| hidden_states = hidden_states[:, :batch_src_len, :] |
| attention_mask = attention_mask.view(bsz, -1, batch_src_len) |
|
|
| attention_mask_ = attention_mask.all(1) |
| hidden_states[attention_mask_] = 0 |
| key_attention_mask = attention_mask_.view(bsz, 1, 1, batch_src_len). \ |
| expand(-1, self.config.encoder_attention_heads, -1, -1).reshape(bsz, self.config.encoder_attention_heads, 1, batch_src_len) |
| new_attention_mask = torch.zeros_like(key_attention_mask, dtype=hidden_states.dtype, device=hidden_states.device) |
| attention_mask = new_attention_mask.masked_fill(key_attention_mask, float("-inf")) |
|
|
| encoder_states = () if output_hidden_states else None |
| all_attentions = () if output_attentions else None |
|
|
| |
| if head_mask is not None: |
| assert head_mask.size()[0] == ( |
| len(self.layers) |
| ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." |
|
|
| for idx, encoder_layer in enumerate(self.layers): |
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
| |
| to_drop = False |
| if self.training: |
| dropout_probability = torch.rand([]) |
| if dropout_probability < self.layerdrop: |
| to_drop = True |
|
|
| if to_drop: |
| layer_outputs = (None, None) |
| else: |
| if self.gradient_checkpointing and self.training: |
| |
| |
| |
| |
| |
| |
| |
| layer_outputs = torch.utils.checkpoint.checkpoint( |
| encoder_layer, |
| hidden_states, |
| attention_mask, |
| (head_mask[idx] if head_mask is not None else None), |
| output_attentions, |
| ) |
| else: |
| layer_outputs = encoder_layer( |
| hidden_states, |
| attention_mask=attention_mask, |
| layer_head_mask=(head_mask[idx] if head_mask is not None else None), |
| output_attentions=output_attentions, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if output_attentions: |
| all_attentions = all_attentions + (layer_outputs[1],) |
|
|
| hidden_states = self.layer_norm(hidden_states) |
| if self.avg_pooler: |
| hidden_states = hidden_states.permute(0, 2, 1) |
| hidden_states = self.avg_pooler(hidden_states) |
| hidden_states = hidden_states.permute(0, 2, 1) |
| |
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) |
| return BaseModelOutput( |
| last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions |
| ) |
|
|
|
|
| class WhisperDecoder(WhisperPreTrainedModel): |
| """ |
| Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`WhisperDecoderLayer`] |
| |
| Args: |
| config: WhisperConfig |
| """ |
|
|
| main_input_name = "input_ids" |
|
|
| def __init__(self, config: WhisperConfig): |
| super().__init__(config) |
| self.dropout = config.dropout |
| self.layerdrop = config.decoder_layerdrop |
| self.padding_idx = config.pad_token_id |
| self.max_target_positions = config.max_target_positions |
| self.max_source_positions = config.max_source_positions |
| self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 |
|
|
| self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) |
| self.embed_positions = WhisperPositionalEmbedding(self.max_target_positions, config.d_model) |
|
|
| self.layers = nn.ModuleList([WhisperDecoderLayer(config) for _ in range(config.decoder_layers)]) |
| self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
| self._use_sdpa = config._attn_implementation == "sdpa" |
|
|
| self.layer_norm = nn.LayerNorm(config.d_model) |
|
|
| self.gradient_checkpointing = False |
| |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.embed_tokens |
|
|
| def set_input_embeddings(self, value): |
| self.embed_tokens = value |
|
|
| def forward( |
| self, |
| input_ids=None, |
| attention_mask=None, |
| encoder_hidden_states=None, |
| head_mask=None, |
| cross_attn_head_mask=None, |
| past_key_values=None, |
| inputs_embeds=None, |
| position_ids=None, |
| use_cache=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| ): |
| r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you |
| provide it. |
| |
| Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): |
| Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention |
| of the decoder. |
| head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention |
| on hidden heads. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of |
| shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of |
| shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. |
| |
| Contains pre-computed hidden-states (key and values in the self-attention blocks and in the |
| cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. |
| |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those |
| that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of |
| all `decoder_input_ids` of shape `(batch_size, sequence_length)`. |
| inputs_embeds (`torch.FloatTensor` of |
| shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing |
| `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more |
| control over how to convert `input_ids` indices into associated vectors than the model's internal |
| embedding lookup matrix. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") |
| elif input_ids is not None: |
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| else: |
| raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") |
|
|
| |
| past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed_tokens(input_ids) |
|
|
| if self._use_flash_attention_2: |
| |
| attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None |
| elif self._use_sdpa and head_mask is None and not output_attentions: |
| |
| attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( |
| attention_mask, input_shape, inputs_embeds, past_key_values_length |
| ) |
| else: |
| |
| attention_mask = _prepare_4d_causal_attention_mask( |
| attention_mask, input_shape, inputs_embeds, past_key_values_length |
| ) |
|
|
| |
| if input_ids is not None: |
| positions = self.embed_positions( |
| input_ids, past_key_values_length=past_key_values_length, position_ids=position_ids |
| ) |
| else: |
| positions = self.embed_positions( |
| inputs_embeds, past_key_values_length=past_key_values_length, position_ids=position_ids |
| ) |
|
|
| hidden_states = inputs_embeds + positions |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..." |
| ) |
| use_cache = False |
| |
| all_hidden_states = () if output_hidden_states else None |
| all_self_attns = () if output_attentions else None |
| all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None |
| next_decoder_cache = () if use_cache else None |
|
|
| |
| for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): |
| if attn_mask is not None: |
| assert attn_mask.size()[0] == (len(self.layers)), ( |
| f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" |
| f" {head_mask.size()[0]}." |
| ) |
| for idx, decoder_layer in enumerate(self.layers): |
| |
| if output_hidden_states: |
| all_hidden_states += (hidden_states,) |
| if self.training: |
| dropout_probability = torch.rand([]) |
| if dropout_probability < self.layerdrop: |
| continue |
|
|
| past_key_value = past_key_values[idx] if past_key_values is not None else None |
|
|
| if self.gradient_checkpointing and self.training: |
| layer_outputs = self._gradient_checkpointing_func( |
| decoder_layer.__call__, |
| hidden_states, |
| attention_mask, |
| encoder_hidden_states, |
| None, |
| head_mask[idx] if head_mask is not None else None, |
| cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, |
| None, |
| output_attentions, |
| use_cache, |
| ) |
| else: |
| layer_outputs = decoder_layer( |
| hidden_states, |
| attention_mask=attention_mask, |
| encoder_hidden_states=encoder_hidden_states, |
| layer_head_mask=(head_mask[idx] if head_mask is not None else None), |
| cross_attn_layer_head_mask=( |
| cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None |
| ), |
| past_key_value=past_key_value, |
| output_attentions=output_attentions, |
| use_cache=use_cache, |
| ) |
| hidden_states = layer_outputs[0] |
|
|
| if use_cache: |
| next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) |
|
|
| if output_attentions: |
| all_self_attns += (layer_outputs[1],) |
|
|
| if encoder_hidden_states is not None: |
| all_cross_attentions += (layer_outputs[2],) |
|
|
| hidden_states = self.layer_norm(hidden_states) |
| |
| if output_hidden_states: |
| all_hidden_states += (hidden_states,) |
|
|
| next_cache = next_decoder_cache if use_cache else None |
| if not return_dict: |
| return tuple( |
| v |
| for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] |
| if v is not None |
| ) |
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=next_cache, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attns, |
| cross_attentions=all_cross_attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| "The bare Whisper Model outputting raw hidden-states without any specific head on top.", |
| WHISPER_START_DOCSTRING, |
| ) |
| class WhisperModel(WhisperPreTrainedModel): |
| def __init__(self, config: WhisperConfig): |
| super().__init__(config) |
|
|
| self.encoder = WhisperEncoder(config) |
| self.decoder = WhisperDecoder(config) |
| |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.decoder.embed_tokens |
|
|
| def set_input_embeddings(self, value): |
| self.decoder.embed_tokens = value |
|
|
| def get_encoder(self): |
| return self.encoder |
|
|
| def get_decoder(self): |
| return self.decoder |
|
|
| def freeze_encoder(self): |
| """ |
| Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will |
| not be updated during training. |
| """ |
| self.encoder._freeze_parameters() |
|
|
| def _mask_input_features( |
| self, |
| input_features: torch.FloatTensor, |
| attention_mask: Optional[torch.LongTensor] = None, |
| ): |
| """ |
| Masks extracted features along time axis and/or along feature axis according to |
| [SpecAugment](https://arxiv.org/abs/1904.08779). |
| """ |
|
|
| |
| if not getattr(self.config, "apply_spec_augment", True): |
| return input_features |
|
|
| |
| batch_size, hidden_size, sequence_length = input_features.size() |
|
|
| if self.config.mask_time_prob > 0 and self.training: |
| |
| mask_time_indices = _compute_mask_indices( |
| (batch_size, sequence_length), |
| mask_prob=self.config.mask_time_prob, |
| mask_length=self.config.mask_time_length, |
| attention_mask=attention_mask, |
| min_masks=self.config.mask_time_min_masks, |
| ) |
| mask_time_indices = torch.tensor(mask_time_indices, device=input_features.device, dtype=torch.bool) |
| mask_time_indices = mask_time_indices[:, None].expand(-1, hidden_size, -1) |
| input_features[mask_time_indices] = 0 |
|
|
| if self.config.mask_feature_prob > 0 and self.training: |
| |
| mask_feature_indices = _compute_mask_indices( |
| (batch_size, hidden_size), |
| mask_prob=self.config.mask_feature_prob, |
| mask_length=self.config.mask_feature_length, |
| min_masks=self.config.mask_feature_min_masks, |
| ) |
| mask_feature_indices = torch.tensor(mask_feature_indices, device=input_features.device, dtype=torch.bool) |
| input_features[mask_feature_indices] = 0 |
|
|
| return input_features |
|
|
| @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) |
| def forward( |
| self, |
| input_features: Optional[torch.FloatTensor] = None, |
| attention_mask: Optional[torch.LongTensor] = None, |
| decoder_input_ids: Optional[torch.LongTensor] = None, |
| decoder_attention_mask: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| decoder_head_mask: Optional[torch.Tensor] = None, |
| cross_attn_head_mask: Optional[torch.Tensor] = None, |
| encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None, |
| decoder_position_ids: Optional[Tuple[torch.LongTensor]] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: |
| r""" |
| Returns: |
| |
| Example: |
| ```python |
| >>> import torch |
| >>> from transformers import AutoFeatureExtractor, WhisperModel |
| >>> from datasets import load_dataset |
| |
| >>> model = WhisperModel.from_pretrained("openai/whisper-base") |
| >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base") |
| >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") |
| >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") |
| >>> input_features = inputs.input_features |
| >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id |
| >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state |
| >>> list(last_hidden_state.shape) |
| [1, 2, 512] |
| ```""" |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if encoder_outputs is None: |
| input_features = self._mask_input_features(input_features, attention_mask=attention_mask) |
|
|
| encoder_outputs = self.encoder( |
| input_features, |
| head_mask=head_mask, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| |
| elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): |
| encoder_outputs = BaseModelOutput( |
| last_hidden_state=encoder_outputs[0], |
| hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, |
| attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, |
| ) |
|
|
| |
| decoder_outputs = self.decoder( |
| input_ids=decoder_input_ids, |
| attention_mask=decoder_attention_mask, |
| encoder_hidden_states=encoder_outputs[0], |
| head_mask=decoder_head_mask, |
| cross_attn_head_mask=cross_attn_head_mask, |
| past_key_values=past_key_values, |
| inputs_embeds=decoder_inputs_embeds, |
| position_ids=decoder_position_ids, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| if not return_dict: |
| return decoder_outputs + encoder_outputs |
|
|
| return Seq2SeqModelOutput( |
| last_hidden_state=decoder_outputs.last_hidden_state, |
| past_key_values=decoder_outputs.past_key_values, |
| decoder_hidden_states=decoder_outputs.hidden_states, |
| decoder_attentions=decoder_outputs.attentions, |
| cross_attentions=decoder_outputs.cross_attentions, |
| encoder_last_hidden_state=encoder_outputs.last_hidden_state, |
| encoder_hidden_states=encoder_outputs.hidden_states, |
| encoder_attentions=encoder_outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", |
| WHISPER_START_DOCSTRING, |
| ) |
| class WhisperForConditionalGeneration(WhisperGenerationMixin, WhisperPreTrainedModel): |
| base_model_prefix = "model" |
| _tied_weights_keys = ["proj_out.weight"] |
|
|
| def __init__(self, config: WhisperConfig): |
| super().__init__(config) |
| self.model = WhisperModel(config) |
| self.proj_out = nn.Linear(config.d_model, config.vocab_size, bias=False) |
|
|
| |
| self.post_init() |
|
|
| def get_encoder(self): |
| return self.model.get_encoder() |
|
|
| def get_decoder(self): |
| return self.model.get_decoder() |
|
|
| def get_output_embeddings(self): |
| return self.proj_out |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.proj_out = new_embeddings |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.model.get_input_embeddings() |
|
|
| def freeze_encoder(self): |
| """ |
| Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will |
| not be updated during training. |
| """ |
| self.model.encoder._freeze_parameters() |
|
|
| @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) |
| def forward( |
| self, |
| input_features: Optional[torch.FloatTensor] = None, |
| attention_mask: Optional[torch.LongTensor] = None, |
| decoder_input_ids: Optional[torch.LongTensor] = None, |
| decoder_attention_mask: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| decoder_head_mask: Optional[torch.Tensor] = None, |
| cross_attn_head_mask: Optional[torch.Tensor] = None, |
| encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None, |
| decoder_position_ids: Optional[Tuple[torch.LongTensor]] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` |
| or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is |
| only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| |
| Returns: |
| |
| Example: |
| |
| ```python |
| >>> import torch |
| >>> from transformers import AutoProcessor, WhisperForConditionalGeneration |
| >>> from datasets import load_dataset |
| |
| >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") |
| >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") |
| |
| >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") |
| |
| >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") |
| >>> input_features = inputs.input_features |
| |
| >>> generated_ids = model.generate(inputs=input_features) |
| |
| >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] |
| >>> transcription |
| ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' |
| ```""" |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if labels is not None: |
| if decoder_input_ids is None and decoder_inputs_embeds is None: |
| decoder_input_ids = shift_tokens_right( |
| labels, self.config.pad_token_id, self.config.decoder_start_token_id |
| ) |
|
|
| outputs = self.model( |
| input_features, |
| attention_mask=attention_mask, |
| decoder_input_ids=decoder_input_ids, |
| encoder_outputs=encoder_outputs, |
| decoder_attention_mask=decoder_attention_mask, |
| head_mask=head_mask, |
| decoder_head_mask=decoder_head_mask, |
| cross_attn_head_mask=cross_attn_head_mask, |
| past_key_values=past_key_values, |
| decoder_inputs_embeds=decoder_inputs_embeds, |
| decoder_position_ids=decoder_position_ids, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| lm_logits = self.proj_out(outputs[0]) |
|
|
| loss = None |
| if labels is not None: |
| loss_fct = CrossEntropyLoss() |
| |
| labels = labels.to(lm_logits.device) |
| loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.reshape(-1)) |
|
|
| if not return_dict: |
| output = (lm_logits,) + outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return Seq2SeqLMOutput( |
| loss=loss, |
| logits=lm_logits, |
| past_key_values=outputs.past_key_values, |
| decoder_hidden_states=outputs.decoder_hidden_states, |
| decoder_attentions=outputs.decoder_attentions, |
| cross_attentions=outputs.cross_attentions, |
| encoder_last_hidden_state=outputs.encoder_last_hidden_state, |
| encoder_hidden_states=outputs.encoder_hidden_states, |
| encoder_attentions=outputs.encoder_attentions, |
| ) |
|
|
| def prepare_inputs_for_generation( |
| self, |
| decoder_input_ids, |
| past_key_values=None, |
| use_cache=None, |
| encoder_outputs=None, |
| attention_mask=None, |
| decoder_attention_mask=None, |
| **kwargs, |
| ): |
| decoder_position_ids = None |
| if decoder_attention_mask is not None: |
| decoder_position_ids = (decoder_attention_mask.cumsum(-1) - 1).clamp(min=0) |
|
|
| if past_key_values is not None: |
| past_length = past_key_values[0][0].shape[2] |
|
|
| |
| if decoder_input_ids.shape[1] > past_length: |
| remove_prefix_length = past_length |
| else: |
| |
| remove_prefix_length = decoder_input_ids.shape[1] - 1 |
|
|
| decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] |
|
|
| if decoder_position_ids is not None and decoder_position_ids.shape[1] > decoder_input_ids.shape[1]: |
| decoder_position_ids = decoder_position_ids[:, remove_prefix_length:] |
|
|
| return { |
| "encoder_outputs": encoder_outputs, |
| "past_key_values": past_key_values, |
| "decoder_input_ids": decoder_input_ids, |
| "use_cache": use_cache, |
| "decoder_attention_mask": decoder_attention_mask, |
| "decoder_position_ids": decoder_position_ids, |
| } |
|
|
| @staticmethod |
| def _reorder_cache(past_key_values, beam_idx): |
| reordered_past = () |
| for layer_past in past_key_values: |
| reordered_past += ( |
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), |
| ) |
| return reordered_past |
|
|
|
|
| class WhisperDecoderWrapper(WhisperPreTrainedModel): |
| """ |
| This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is |
| used in combination with the [`EncoderDecoderModel`] framework. |
| """ |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| config.is_encoder_decoder = False |
| self.decoder = WhisperDecoder(config) |
|
|
| def get_input_embeddings(self): |
| return self.decoder.embed_tokens |
|
|
| def set_input_embeddings(self, value): |
| self.decoder.embed_tokens = value |
|
|
| def forward(self, *args, **kwargs): |
| return self.decoder(*args, **kwargs) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| Whisper decoder with a language modeling head on top (linear layer with weights tied to the input embeddings). |
| """, |
| WHISPER_START_DOCSTRING, |
| ) |
| class WhisperForCausalLM(WhisperPreTrainedModel): |
| _tied_weights_keys = ["proj_out.weight"] |
| main_input_name = "input_ids" |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| config.is_encoder_decoder = False |
| self.model = WhisperDecoderWrapper(config) |
|
|
| self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
| |
| self.post_init() |
|
|
| def get_output_embeddings(self): |
| return self.proj_out |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.proj_out = new_embeddings |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.model.get_input_embeddings() |
|
|
| def set_input_embeddings(self, value): |
| self.model.set_input_embeddings(value) |
|
|
| def set_decoder(self, decoder): |
| self.model.decoder = decoder |
|
|
| def get_decoder(self): |
| return self.model.decoder |
|
|
| @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) |
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| cross_attn_head_mask: Optional[torch.Tensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
| r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you |
| provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| [What are attention masks?](../glossary#attention-mask) |
| encoder_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention |
| if the model is configured as a decoder. |
| head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of |
| shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of |
| shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional |
| tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains |
| pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention |
| blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If |
| `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| than the model's internal embedding lookup matrix. |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding |
| (see `past_key_values`). |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| |
| Returns: |
| |
| Example: |
| |
| ```python |
| >>> from transformers import WhisperForCausalLM, WhisperForConditionalGeneration, WhisperProcessor |
| >>> import torch |
| >>> from datasets import load_dataset |
| |
| >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") |
| >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") |
| |
| >>> assistant_model = WhisperForCausalLM.from_pretrained("distil-whisper/distil-large-v2") |
| |
| >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") |
| >>> sample = ds[0]["audio"] |
| >>> input_features = processor( |
| ... sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt" |
| ... ).input_features |
| |
| >>> predicted_ids = model.generate(input_features, assistant_model=assistant_model) |
| |
| >>> # decode token ids to text |
| >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] |
| >>> transcription |
| ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.' |
| ```""" |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
| if isinstance(encoder_outputs, (BaseModelOutput, tuple, list)): |
| encoder_outputs = encoder_outputs[0] |
|
|
| |
| outputs = self.model.decoder( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| encoder_hidden_states=encoder_outputs, |
| head_mask=head_mask, |
| cross_attn_head_mask=cross_attn_head_mask, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| logits = self.proj_out(outputs[0]) |
|
|
| loss = None |
| if labels is not None: |
| labels = labels.to(logits.device) |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[1:] |
| return (loss,) + output if loss is not None else output |
|
|
| return CausalLMOutputWithCrossAttentions( |
| loss=loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| cross_attentions=outputs.cross_attentions, |
| ) |
|
|
| def prepare_inputs_for_generation( |
| self, |
| input_ids, |
| past_key_values=None, |
| use_cache=None, |
| encoder_outputs=None, |
| attention_mask=None, |
| **kwargs, |
| ): |
| if past_key_values is not None: |
| past_length = past_key_values[0][0].shape[2] |
|
|
| |
| if input_ids.shape[1] > past_length: |
| remove_prefix_length = past_length |
| else: |
| |
| remove_prefix_length = input_ids.shape[1] - 1 |
|
|
| input_ids = input_ids[:, remove_prefix_length:] |
|
|
| return { |
| "encoder_outputs": encoder_outputs, |
| "past_key_values": past_key_values, |
| "input_ids": input_ids, |
| "use_cache": use_cache, |
| "attention_mask": attention_mask, |
| } |
|
|
| @staticmethod |
| def _reorder_cache(past_key_values, beam_idx): |
| reordered_past = () |
| for layer_past in past_key_values: |
| reordered_past += ( |
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), |
| ) |
| return reordered_past |
|
|
|
|
| @add_start_docstrings( |
| """ |
| Whisper Encoder Model with a sequence classification head on top (a linear layer over the pooled output) for tasks |
| like SUPERB Keyword Spotting. |
| """, |
| WHISPER_ENCODER_INPUTS_DOCSTRING, |
| ) |
| class WhisperForAudioClassification(WhisperPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
|
|
| self.encoder = WhisperEncoder(config) |
| num_layers = config.num_hidden_layers + 1 |
| if config.use_weighted_layer_sum: |
| self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) |
| self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) |
| self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) |
|
|
| |
| self.post_init() |
|
|
| def freeze_encoder(self): |
| """ |
| Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will |
| not be updated during training. Only the projection layers and classification head will be updated. |
| """ |
| self.encoder._freeze_parameters() |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.encoder.get_input_embeddings() |
|
|
| def set_input_embeddings(self, value: nn.Module): |
| self.encoder.set_input_embeddings(value) |
|
|
| @add_start_docstrings_to_model_forward(WHISPER_ENCODER_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) |
| def forward( |
| self, |
| input_features: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| labels: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| |
| Returns: |
| |
| Example: |
| |
| ```python |
| >>> import torch |
| >>> from transformers import AutoFeatureExtractor, WhisperForAudioClassification |
| >>> from datasets import load_dataset |
| |
| >>> feature_extractor = AutoFeatureExtractor.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id") |
| >>> model = WhisperForAudioClassification.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id") |
| |
| >>> ds = load_dataset("google/fleurs", "all", split="validation", streaming=True) |
| >>> sample = next(iter(ds)) |
| |
| >>> inputs = feature_extractor( |
| ... sample["audio"]["array"], sampling_rate=sample["audio"]["sampling_rate"], return_tensors="pt" |
| ... ) |
| >>> input_features = inputs.input_features |
| |
| >>> with torch.no_grad(): |
| ... logits = model(input_features).logits |
| |
| >>> predicted_class_ids = torch.argmax(logits).item() |
| >>> predicted_label = model.config.id2label[predicted_class_ids] |
| >>> predicted_label |
| 'Afrikaans' |
| ```""" |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| if self.config.use_weighted_layer_sum: |
| output_hidden_states = True |
| elif output_hidden_states is None: |
| output_hidden_states = self.config.output_hidden_states |
|
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if encoder_outputs is None: |
| encoder_outputs = self.encoder( |
| input_features, |
| head_mask=head_mask, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| if self.config.use_weighted_layer_sum: |
| hidden_states = encoder_outputs[_HIDDEN_STATES_START_POSITION] |
| hidden_states = torch.stack(hidden_states, dim=1) |
| norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) |
| hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) |
| else: |
| hidden_states = encoder_outputs[0] |
|
|
| hidden_states = self.projector(hidden_states) |
| pooled_output = hidden_states.mean(dim=1) |
|
|
| logits = self.classifier(pooled_output) |
|
|
| loss = None |
|
|
| if labels is not None: |
| loss_fct = CrossEntropyLoss() |
| |
| labels = labels.to(logits.device) |
| loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + encoder_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutput( |
| loss=loss, |
| logits=logits, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| ) |
|
|
|
|
| class WhisperOutput(ModelOutput): |
| last_hidden_state: torch.FloatTensor = None |
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
| attentions: Optional[Tuple[torch.FloatTensor]] = None |
| output_lengths: Optional[torch.LongTensor] = None |
|
|
|
|
|
|
| class AudioWhisperModel(WhisperPreTrainedModel): |
| """ |
| overwrite forward to support attention_mask |
| overwrite from_pretrained to support split encoder parameters from pretrained WhisperModel |
| """ |
| def __init__(self, config: WhisperConfig): |
| super().__init__(config) |
| self.encoder = WhisperEncoder(config) |
|
|
| def forward( |
| self, |
| input_features, |
| attention_mask=None, |
| audio_len_after_cnn=None, |
| head_mask=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| ): |
|
|
| output = self.encoder( |
| input_features, |
| attention_mask, |
| audio_len_after_cnn, |
| head_mask, |
| output_attentions, |
| output_hidden_states, |
| return_dict |
| ) |
| |
| last_hidden_state = output.last_hidden_state |
| return last_hidden_state |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |