| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """PyTorch OpenAI GPT-2 model.""" |
|
|
| import math |
| import os |
| import warnings |
| from dataclasses import dataclass |
| from typing import Callable, Optional, Tuple, Union |
|
|
| import torch |
| from torch import nn |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
|
| from transformers.activations import ACT2FN, get_activation |
| from transformers.cache_utils import Cache, DynamicCache, EncoderDecoderCache, StaticCache |
| from transformers.generation import GenerationMixin |
| from transformers.modeling_attn_mask_utils import ( |
| AttentionMaskConverter, |
| _prepare_4d_attention_mask_for_sdpa, |
| ) |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPastAndCrossAttentions, |
| CausalLMOutputWithCrossAttentions, |
| QuestionAnsweringModelOutput, |
| SequenceClassifierOutputWithPast, |
| TokenClassifierOutput, |
| ) |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| from transformers.pytorch_utils import ( |
| Conv1D, |
| find_pruneable_heads_and_indices, |
| prune_conv1d_layer, |
| ) |
| from transformers.utils import ( |
| ModelOutput, |
| add_start_docstrings, |
| auto_docstring, |
| logging, |
| ) |
| from transformers.utils.deprecation import deprecate_kwarg |
| from transformers.utils.model_parallel_utils import assert_device_map, get_device_map |
| from .configuration_gpt2 import GPT2Config |
| from transformers.models.gpt2.modeling_gpt2 import ( |
| load_tf_weights_in_gpt2, |
| eager_attention_forward, |
| ) |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class GPT2Attention(nn.Module): |
| def __init__(self, config, is_cross_attention=False, layer_idx=None): |
| super().__init__() |
| self.config = config |
| max_positions = config.max_position_embeddings |
| self.register_buffer( |
| "bias", |
| torch.tril( |
| torch.ones((max_positions, max_positions), dtype=torch.bool) |
| ).view(1, 1, max_positions, max_positions), |
| persistent=False, |
| ) |
| self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) |
|
|
| self.embed_dim = config.hidden_size |
| self.num_heads = config.num_attention_heads |
| self.head_dim = self.embed_dim // self.num_heads |
| self.split_size = self.embed_dim |
| if self.head_dim * self.num_heads != self.embed_dim: |
| raise ValueError( |
| f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| f" {self.num_heads})." |
| ) |
|
|
| self.scale_attn_weights = config.scale_attn_weights |
| self.is_cross_attention = is_cross_attention |
|
|
| |
| self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx |
| self.layer_idx = layer_idx |
| self.reorder_and_upcast_attn = config.reorder_and_upcast_attn |
|
|
| if self.is_cross_attention: |
| self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) |
| self.q_attn = Conv1D(self.embed_dim, self.embed_dim) |
| else: |
| self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) |
| self.c_proj = Conv1D(self.embed_dim, self.embed_dim) |
|
|
| self.attn_dropout = nn.Dropout(config.attn_pdrop) |
| self.resid_dropout = nn.Dropout(config.resid_pdrop) |
| self.is_causal = True |
|
|
| self.pruned_heads = set() |
|
|
| def prune_heads(self, heads): |
| if len(heads) == 0: |
| return |
| heads, index = find_pruneable_heads_and_indices( |
| heads, self.num_heads, self.head_dim, self.pruned_heads |
| ) |
| index_attn = torch.cat( |
| [index, index + self.split_size, index + (2 * self.split_size)] |
| ) |
|
|
| |
| self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) |
| self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) |
|
|
| |
| self.split_size = (self.split_size // self.num_heads) * ( |
| self.num_heads - len(heads) |
| ) |
| self.num_heads = self.num_heads - len(heads) |
| self.pruned_heads = self.pruned_heads.union(heads) |
|
|
| def _upcast_and_reordered_attn( |
| self, query, key, value, attention_mask=None, head_mask=None |
| ): |
| |
| bsz, num_heads, q_seq_len, dk = query.size() |
| _, _, k_seq_len, _ = key.size() |
|
|
| |
| attn_weights = torch.empty( |
| bsz * num_heads, |
| q_seq_len, |
| k_seq_len, |
| dtype=torch.float32, |
| device=query.device, |
| ) |
|
|
| |
| scale_factor = 1.0 |
| if self.scale_attn_weights: |
| scale_factor /= float(value.size(-1)) ** 0.5 |
|
|
| if self.scale_attn_by_inverse_layer_idx: |
| scale_factor /= float(self.layer_idx + 1) |
|
|
| |
| with torch.amp.autocast(query.device.type, enabled=False): |
| q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape( |
| -1, dk, k_seq_len |
| ) |
| attn_weights = torch.baddbmm( |
| attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor |
| ) |
| attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) |
|
|
| if not self.is_cross_attention: |
| |
| query_length, key_length = query.size(-2), key.size(-2) |
| causal_mask = self.bias[ |
| :, :, key_length - query_length : key_length, :key_length |
| ] |
| mask_value = torch.finfo(attn_weights.dtype).min |
| |
| |
| mask_value = torch.tensor( |
| mask_value, dtype=attn_weights.dtype, device=attn_weights.device |
| ) |
| attn_weights = torch.where(causal_mask, attn_weights, mask_value) |
|
|
| if attention_mask is not None: |
| |
| attn_weights = attn_weights + attention_mask |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
|
|
| |
| if attn_weights.dtype != torch.float32: |
| raise RuntimeError( |
| "Error with upcasting, attn_weights does not have dtype torch.float32" |
| ) |
| attn_weights = attn_weights.type(value.dtype) |
| attn_weights = self.attn_dropout(attn_weights) |
|
|
| |
| if head_mask is not None: |
| attn_weights = attn_weights * head_mask |
|
|
| attn_output = torch.matmul(attn_weights, value) |
| attn_output = attn_output.transpose(1, 2) |
|
|
| return attn_output, attn_weights |
|
|
| @deprecate_kwarg( |
| "layer_past", |
| new_name="past_key_value", |
| version="4.53.0", |
| raise_if_both_names=True, |
| ) |
| def forward( |
| self, |
| hidden_states: Optional[Tuple[torch.FloatTensor]], |
| past_key_value: Optional[Cache] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| output_attentions: Optional[bool] = False, |
| **kwargs, |
| ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: |
| is_cross_attention = encoder_hidden_states is not None |
| if is_cross_attention: |
| if not hasattr(self, "q_attn"): |
| raise ValueError( |
| "If class is used as cross attention, the weights `q_attn` have to be defined. " |
| "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." |
| ) |
|
|
| query_states = self.q_attn(hidden_states) |
| key_states, value_states = self.c_attn(encoder_hidden_states).split( |
| self.split_size, dim=2 |
| ) |
| attention_mask = encoder_attention_mask |
| else: |
| query_states, key_states, value_states = self.c_attn(hidden_states).split( |
| self.split_size, dim=2 |
| ) |
|
|
| shape_q = (query_states.shape[0],query_states.shape[1], -1, self.head_dim) |
| shape_kv = (key_states.shape[0], key_states.shape[1],-1, self.head_dim) |
|
|
| query_states = query_states.view(shape_q).transpose(1, 2) |
| key_states = key_states.view(shape_kv).transpose(1, 2) |
| value_states = value_states.view(shape_kv).transpose(1, 2) |
|
|
| if past_key_value is not None: |
| if isinstance(past_key_value, EncoderDecoderCache): |
| if is_cross_attention: |
| past_key_value = past_key_value.cross_attention_cache |
| else: |
| past_key_value = past_key_value.self_attention_cache |
| cache_kwargs = {"cache_position": cache_position} |
| key_states, value_states = past_key_value.update( |
| key_states, value_states, self.layer_idx, cache_kwargs=cache_kwargs |
| ) |
|
|
| is_causal = ( |
| attention_mask is None |
| and query_states.shape[-2] > 1 |
| and not is_cross_attention |
| ) |
|
|
| using_eager = self.config._attn_implementation == "eager" |
| attention_interface: Callable = eager_attention_forward |
| if self.config._attn_implementation != "eager": |
| if self.config._attn_implementation == "sdpa" and ( |
| output_attentions or head_mask is not None |
| ): |
| using_eager = True |
| logger.warning_once( |
| "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " |
| 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| ) |
| else: |
| |
| |
| |
| attention_interface = ALL_ATTENTION_FUNCTIONS[ |
| self.config._attn_implementation |
| ] |
|
|
| if using_eager and self.reorder_and_upcast_attn: |
| attn_output, attn_weights = self._upcast_and_reordered_attn( |
| query_states, key_states, value_states, attention_mask, head_mask |
| ) |
| else: |
| attn_output, attn_weights = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask, |
| head_mask=head_mask, |
| dropout=self.attn_dropout.p if self.training else 0.0, |
| is_causal=is_causal, |
| **kwargs, |
| ) |
|
|
| attn_output = attn_output.reshape(attn_output.shape[0],attn_output.shape[1], -1).contiguous() |
| attn_output = self.c_proj(attn_output) |
| attn_output = self.resid_dropout(attn_output) |
|
|
| return attn_output, attn_weights |
|
|
|
|
| class GPT2MLP(nn.Module): |
| def __init__(self, intermediate_size, config): |
| super().__init__() |
| embed_dim = config.hidden_size |
| self.c_fc = Conv1D(intermediate_size, embed_dim) |
| self.c_proj = Conv1D(embed_dim, intermediate_size) |
| self.act = ACT2FN[config.activation_function] |
| self.dropout = nn.Dropout(config.resid_pdrop) |
|
|
| def forward( |
| self, hidden_states: Optional[Tuple[torch.FloatTensor]] |
| ) -> torch.FloatTensor: |
| hidden_states = self.c_fc(hidden_states) |
| hidden_states = self.act(hidden_states) |
| hidden_states = self.c_proj(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| return hidden_states |
|
|
|
|
| class GPT2Block(nn.Module): |
| def __init__(self, config, layer_idx=None): |
| super().__init__() |
| hidden_size = config.hidden_size |
| inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size |
|
|
| self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
| self.attn = GPT2Attention(config=config, layer_idx=layer_idx) |
| self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
|
|
| if config.add_cross_attention: |
| self.crossattention = GPT2Attention( |
| config=config, is_cross_attention=True, layer_idx=layer_idx |
| ) |
| self.ln_cross_attn = nn.LayerNorm( |
| hidden_size, eps=config.layer_norm_epsilon |
| ) |
|
|
| self.mlp = GPT2MLP(inner_dim, config) |
|
|
| @deprecate_kwarg( |
| "layer_past", |
| new_name="past_key_value", |
| version="4.53.0", |
| raise_if_both_names=True, |
| ) |
| def forward( |
| self, |
| hidden_states: Optional[Tuple[torch.FloatTensor]], |
| past_key_value: Optional[Cache] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| **kwargs, |
| ) -> Union[ |
| Tuple[torch.Tensor], |
| Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]], |
| ]: |
| residual = hidden_states |
| hidden_states = self.ln_1(hidden_states) |
| attn_output, self_attn_weights = self.attn( |
| hidden_states, |
| past_key_value=past_key_value, |
| cache_position=cache_position, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| **kwargs, |
| ) |
| |
| hidden_states = attn_output + residual |
|
|
| if encoder_hidden_states is not None: |
| |
| if not hasattr(self, "crossattention"): |
| raise ValueError( |
| f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " |
| "cross-attention layers by setting `config.add_cross_attention=True`" |
| ) |
| residual = hidden_states |
| hidden_states = self.ln_cross_attn(hidden_states) |
| cross_attn_output, cross_attn_weights = self.crossattention( |
| hidden_states, |
| past_key_value=past_key_value, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| output_attentions=output_attentions, |
| ) |
| |
| hidden_states = residual + cross_attn_output |
|
|
| residual = hidden_states |
| hidden_states = self.ln_2(hidden_states) |
| feed_forward_hidden_states = self.mlp(hidden_states) |
| |
| hidden_states = residual + feed_forward_hidden_states |
|
|
| outputs = (hidden_states,) |
| if output_attentions: |
| outputs += (self_attn_weights,) |
| if encoder_hidden_states is not None: |
| outputs += (cross_attn_weights,) |
|
|
| return outputs |
|
|
|
|
| |
| class GPT2SequenceSummary(nn.Module): |
| r""" |
| Compute a single vector summary of a sequence hidden states. |
| |
| Args: |
| config ([`GPT2Config`]): |
| The config used by the model. Relevant arguments in the config class of the model are (refer to the actual |
| config class of your model for the default values it uses): |
| |
| - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: |
| |
| - `"last"` -- Take the last token hidden state (like XLNet) |
| - `"first"` -- Take the first token hidden state (like Bert) |
| - `"mean"` -- Take the mean of all tokens hidden states |
| - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) |
| - `"attn"` -- Not implemented now, use multi-head attention |
| |
| - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. |
| - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes |
| (otherwise to `config.hidden_size`). |
| - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, |
| another string or `None` will add no activation. |
| - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. |
| - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. |
| """ |
|
|
| def __init__(self, config: GPT2Config): |
| super().__init__() |
|
|
| self.summary_type = getattr(config, "summary_type", "last") |
| if self.summary_type == "attn": |
| |
| |
| |
| raise NotImplementedError |
|
|
| self.summary = nn.Identity() |
| if hasattr(config, "summary_use_proj") and config.summary_use_proj: |
| if ( |
| hasattr(config, "summary_proj_to_labels") |
| and config.summary_proj_to_labels |
| and config.num_labels > 0 |
| ): |
| num_classes = config.num_labels |
| else: |
| num_classes = config.hidden_size |
| self.summary = nn.Linear(config.hidden_size, num_classes) |
|
|
| activation_string = getattr(config, "summary_activation", None) |
| self.activation: Callable = ( |
| get_activation(activation_string) if activation_string else nn.Identity() |
| ) |
|
|
| self.first_dropout = nn.Identity() |
| if ( |
| hasattr(config, "summary_first_dropout") |
| and config.summary_first_dropout > 0 |
| ): |
| self.first_dropout = nn.Dropout(config.summary_first_dropout) |
|
|
| self.last_dropout = nn.Identity() |
| if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: |
| self.last_dropout = nn.Dropout(config.summary_last_dropout) |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| cls_index: Optional[torch.LongTensor] = None, |
| ) -> torch.FloatTensor: |
| """ |
| Compute a single vector summary of a sequence hidden states. |
| |
| Args: |
| hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): |
| The hidden states of the last layer. |
| cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): |
| Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. |
| |
| Returns: |
| `torch.FloatTensor`: The summary of the sequence hidden states. |
| """ |
| if self.summary_type == "last": |
| output = hidden_states[:, -1] |
| elif self.summary_type == "first": |
| output = hidden_states[:, 0] |
| elif self.summary_type == "mean": |
| output = hidden_states.mean(dim=1) |
| elif self.summary_type == "cls_index": |
| if cls_index is None: |
| cls_index = torch.full_like( |
| hidden_states[..., :1, :], |
| hidden_states.shape[-2] - 1, |
| dtype=torch.long, |
| ) |
| else: |
| cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) |
| cls_index = cls_index.expand( |
| (-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),) |
| ) |
| |
| output = hidden_states.gather(-2, cls_index).squeeze( |
| -2 |
| ) |
| elif self.summary_type == "attn": |
| raise NotImplementedError |
|
|
| output = self.first_dropout(output) |
| output = self.summary(output) |
| output = self.activation(output) |
| output = self.last_dropout(output) |
|
|
| return output |
|
|
|
|
| @auto_docstring |
| class GPT2PreTrainedModel(PreTrainedModel): |
| config_class = GPT2Config |
| load_tf_weights = load_tf_weights_in_gpt2 |
| base_model_prefix = "transformer" |
| is_parallelizable = True |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["GPT2Block"] |
| _skip_keys_device_placement = "past_key_values" |
| _supports_flash_attn_2 = True |
| _supports_sdpa = True |
| _supports_attention_backend = True |
| _supports_cache_class = True |
| _supports_static_cache = True |
|
|
| def __init__(self, *inputs, **kwargs): |
| super().__init__(*inputs, **kwargs) |
|
|
| def _init_weights(self, module): |
| """Initialize the weights.""" |
| if isinstance(module, (nn.Linear, Conv1D)): |
| |
| |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
| |
| |
| |
| |
| |
| |
| for name, p in module.named_parameters(): |
| if name == "c_proj.weight": |
| |
| p.data.normal_( |
| mean=0.0, |
| std=( |
| self.config.initializer_range |
| / math.sqrt(2 * self.config.n_layer) |
| ), |
| ) |
|
|
|
|
| @dataclass |
| class GPT2DoubleHeadsModelOutput(ModelOutput): |
| """ |
| Base class for outputs of models predicting if two sentences are consecutive or not. |
| |
| Args: |
| loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
| Language modeling loss. |
| mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided): |
| Multiple choice classification loss. |
| logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): |
| Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
| mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): |
| Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). |
| past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, |
| sequence_length, embed_size_per_head)`). |
| |
| Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see |
| `past_key_values` input) to speed up sequential decoding. |
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of |
| shape `(batch_size, sequence_length, hidden_size)`. |
| |
| Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| sequence_length)`. |
| |
| GPT2Attentions weights after the attention softmax, used to compute the weighted average in the |
| self-attention heads. |
| """ |
|
|
| loss: Optional[torch.FloatTensor] = None |
| mc_loss: Optional[torch.FloatTensor] = None |
| logits: Optional[torch.FloatTensor] = None |
| mc_logits: Optional[torch.FloatTensor] = None |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None |
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
| attentions: Optional[Tuple[torch.FloatTensor]] = None |
|
|
|
|
| PARALLELIZE_DOCSTRING = r""" |
| This is an experimental feature and is a subject to change at a moment's notice. |
| |
| Uses a device map to distribute attention modules of the model across several devices. If no device map is given, |
| it will evenly distribute blocks across all devices. |
| |
| Args: |
| device_map (`Dict[int, list]`, *optional*): |
| A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always |
| automatically mapped to the first device (for esoteric reasons). That means that the first device should |
| have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the |
| following number of attention modules: |
| |
| - openai-community/gpt2: 12 |
| - openai-community/gpt2-medium: 24 |
| - openai-community/gpt2-large: 36 |
| - openai-community/gpt2-xl: 48 |
| |
| Example: |
| |
| ```python |
| # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: |
| model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl") |
| device_map = { |
| 0: [0, 1, 2, 3, 4, 5, 6, 7, 8], |
| 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], |
| 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], |
| 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], |
| } |
| model.parallelize(device_map) |
| ``` |
| """ |
| DEPARALLELIZE_DOCSTRING = r""" |
| Moves the model to cpu from a model parallel state. |
| |
| Example: |
| |
| ```python |
| # On a 4 GPU machine with openai-community/gpt2-large: |
| model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large") |
| device_map = { |
| 0: [0, 1, 2, 3, 4, 5, 6, 7], |
| 1: [8, 9, 10, 11, 12, 13, 14, 15], |
| 2: [16, 17, 18, 19, 20, 21, 22, 23], |
| 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], |
| } |
| model.parallelize(device_map) # Splits the model across several devices |
| model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() |
| ``` |
| """ |
|
|
|
|
| @auto_docstring |
| class GPT2Model(GPT2PreTrainedModel): |
| _supports_param_buffer_assignment = False |
|
|
| def __init__(self, config): |
| super().__init__(config) |
|
|
| self.embed_dim = config.hidden_size |
|
|
| self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
| self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) |
|
|
| self.drop = nn.Dropout(config.embd_pdrop) |
| self.h = nn.ModuleList( |
| [GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)] |
| ) |
| self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
| self.gradient_checkpointing = False |
| self._attn_implementation = config._attn_implementation |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| def parallelize(self, device_map=None): |
| |
| warnings.warn( |
| "`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" |
| " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," |
| " ...}", |
| FutureWarning, |
| ) |
| self.device_map = ( |
| get_device_map(len(self.h), range(torch.cuda.device_count())) |
| if device_map is None |
| else device_map |
| ) |
| assert_device_map(self.device_map, len(self.h)) |
| self.model_parallel = True |
| self.first_device = ( |
| "cpu" |
| if "cpu" in self.device_map.keys() |
| else "cuda:" + str(min(self.device_map.keys())) |
| ) |
| self.last_device = "cuda:" + str(max(self.device_map.keys())) |
| self.wte = self.wte.to(self.first_device) |
| self.wpe = self.wpe.to(self.first_device) |
| |
| for k, v in self.device_map.items(): |
| for block in v: |
| cuda_device = "cuda:" + str(k) |
| self.h[block] = self.h[block].to(cuda_device) |
| |
| self.ln_f = self.ln_f.to(self.last_device) |
|
|
| @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| def deparallelize(self): |
| warnings.warn( |
| "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| FutureWarning, |
| ) |
| self.model_parallel = False |
| self.device_map = None |
| self.first_device = "cpu" |
| self.last_device = "cpu" |
| self.wte = self.wte.to("cpu") |
| self.wpe = self.wpe.to("cpu") |
| for index in range(len(self.h)): |
| self.h[index] = self.h[index].to("cpu") |
| self.ln_f = self.ln_f.to("cpu") |
| torch.cuda.empty_cache() |
|
|
| def get_input_embeddings(self): |
| return self.wte |
|
|
| def set_input_embeddings(self, new_embeddings): |
| self.wte = new_embeddings |
|
|
| def _prune_heads(self, heads_to_prune): |
| """ |
| Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
| """ |
| for layer, heads in heads_to_prune.items(): |
| self.h[layer].attn.prune_heads(heads) |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Union[Tuple[Tuple[torch.Tensor]], Cache]] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| **kwargs, |
| ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| r""" |
| input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| sequence tokens in the vocabulary. |
| |
| If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| `input_ids`. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| """ |
| output_attentions = ( |
| output_attentions |
| if output_attentions is not None |
| else self.config.output_attentions |
| ) |
| output_hidden_states = ( |
| output_hidden_states |
| if output_hidden_states is not None |
| else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError( |
| "You cannot specify both input_ids and inputs_embeds at the same time" |
| ) |
| elif input_ids is not None: |
| self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| batch_size = input_ids.shape[0] |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| batch_size = inputs_embeds.shape[0] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
| if token_type_ids is not None: |
| token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| |
| return_legacy_cache = False |
| if use_cache: |
| if past_key_values is None: |
| return_legacy_cache = True |
| past_key_values = DynamicCache() |
| elif not isinstance(past_key_values, Cache): |
| return_legacy_cache = True |
| logger.warning_once( |
| "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.53.0. " |
| "You should pass an instance of `Cache` instead, e.g. " |
| "`past_key_values=DynamicCache.from_legacy_cache(past_key_values)`." |
| ) |
| past_key_values = DynamicCache.from_legacy_cache(past_key_values) |
|
|
| if self.config.add_cross_attention and not isinstance( |
| past_key_values, EncoderDecoderCache |
| ): |
| past_key_values = EncoderDecoderCache(past_key_values, DynamicCache()) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.wte(input_ids) |
|
|
| if cache_position is None: |
| past_seen_tokens = ( |
| past_key_values.get_seq_length() if past_key_values is not None else 0 |
| ) |
| cache_position = torch.arange( |
| past_seen_tokens, |
| past_seen_tokens + inputs_embeds.shape[1], |
| device=inputs_embeds.device, |
| ) |
| if position_ids is None: |
| position_ids = cache_position.unsqueeze(0) |
|
|
| position_embeds = self.wpe(position_ids) |
| hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device) |
|
|
| |
| |
| if attention_mask is not None and attention_mask.ndim < 4: |
| attention_mask = attention_mask.view(batch_size, -1) |
| causal_mask = self._update_causal_mask( |
| attention_mask, |
| inputs_embeds, |
| cache_position, |
| past_key_values, |
| output_attentions, |
| ) |
|
|
| |
| |
| _use_sdpa = ( |
| self._attn_implementation == "sdpa" |
| and output_attentions is False |
| and head_mask is None |
| ) |
| if self.config.add_cross_attention and encoder_hidden_states is not None: |
| encoder_batch_size, encoder_sequence_length, _ = ( |
| encoder_hidden_states.size() |
| ) |
| encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| if encoder_attention_mask is None: |
| encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| if _use_sdpa: |
| encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( |
| mask=encoder_attention_mask, |
| dtype=inputs_embeds.dtype, |
| tgt_len=input_shape[-1], |
| ) |
| elif not self._attn_implementation == "flash_attention_2": |
| encoder_attention_mask = self.invert_attention_mask( |
| encoder_attention_mask |
| ) |
| else: |
| encoder_attention_mask = None |
|
|
| |
| |
| |
| |
| head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
|
|
| if token_type_ids is not None: |
| token_type_embeds = self.wte(token_type_ids) |
| hidden_states = hidden_states + token_type_embeds |
|
|
| hidden_states = self.drop(hidden_states) |
|
|
| output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) |
|
|
| all_self_attentions = () if output_attentions else None |
| all_cross_attentions = ( |
| () if output_attentions and self.config.add_cross_attention else None |
| ) |
| all_hidden_states = () if output_hidden_states else None |
| for i, block in enumerate(self.h): |
| |
| if self.model_parallel: |
| torch.cuda.set_device(hidden_states.device) |
| |
| if attention_mask is not None: |
| attention_mask = attention_mask.to(hidden_states.device) |
| if isinstance(head_mask, torch.Tensor): |
| head_mask = head_mask.to(hidden_states.device) |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if self.gradient_checkpointing and self.training: |
| outputs = self._gradient_checkpointing_func( |
| block.__call__, |
| hidden_states, |
| past_key_values, |
| cache_position, |
| causal_mask, |
| head_mask[i], |
| encoder_hidden_states, |
| encoder_attention_mask, |
| use_cache, |
| output_attentions, |
| ) |
| else: |
| outputs = block( |
| hidden_states, |
| past_key_value=past_key_values, |
| cache_position=cache_position, |
| attention_mask=causal_mask, |
| head_mask=head_mask[i], |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| **kwargs, |
| ) |
|
|
| hidden_states = outputs[0] |
|
|
| if output_attentions: |
| all_self_attentions = all_self_attentions + (outputs[1],) |
| if self.config.add_cross_attention: |
| all_cross_attentions = all_cross_attentions + (outputs[2],) |
|
|
| |
| if self.model_parallel: |
| for k, v in self.device_map.items(): |
| if i == v[-1] and "cuda:" + str(k) != self.last_device: |
| hidden_states = hidden_states.to("cuda:" + str(k + 1)) |
|
|
| hidden_states = self.ln_f(hidden_states) |
|
|
| hidden_states = hidden_states.view(output_shape) |
| |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| past_key_values = past_key_values if use_cache else None |
| if return_legacy_cache: |
| past_key_values = ( |
| past_key_values.self_attention_cache.to_legacy_cache() |
| if self.config.add_cross_attention |
| else past_key_values.to_legacy_cache() |
| ) |
| if not return_dict: |
| return tuple( |
| v |
| for v in [ |
| hidden_states, |
| past_key_values, |
| all_hidden_states, |
| all_self_attentions, |
| all_cross_attentions, |
| ] |
| if v is not None |
| ) |
|
|
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=past_key_values, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attentions, |
| cross_attentions=all_cross_attentions, |
| ) |
|
|
| def _update_causal_mask( |
| self, |
| attention_mask: torch.Tensor, |
| input_tensor: torch.Tensor, |
| cache_position: torch.Tensor, |
| past_key_values: Cache, |
| output_attentions: bool, |
| ): |
| if self.config._attn_implementation == "flash_attention_2": |
| if attention_mask is not None and 0.0 in attention_mask: |
| return attention_mask |
| return None |
|
|
| |
| |
| |
| past_seen_tokens = ( |
| past_key_values.get_seq_length() if past_key_values is not None else 0 |
| ) |
| using_static_cache = isinstance(past_key_values, StaticCache) |
|
|
| |
| if ( |
| self.config._attn_implementation == "sdpa" |
| and not using_static_cache |
| and not output_attentions |
| ): |
| if AttentionMaskConverter._ignore_causal_mask_sdpa( |
| attention_mask, |
| inputs_embeds=input_tensor, |
| past_key_values_length=past_seen_tokens, |
| is_training=self.training, |
| ): |
| return None |
|
|
| dtype = input_tensor.dtype |
| sequence_length = input_tensor.shape[1] |
| if using_static_cache: |
| target_length = past_key_values.get_max_cache_shape() |
| else: |
| target_length = ( |
| attention_mask.shape[-1] |
| if isinstance(attention_mask, torch.Tensor) |
| else past_seen_tokens + sequence_length + 1 |
| ) |
|
|
| |
| causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( |
| attention_mask, |
| sequence_length=sequence_length, |
| target_length=target_length, |
| dtype=dtype, |
| cache_position=cache_position, |
| batch_size=input_tensor.shape[0], |
| ) |
|
|
| if ( |
| self.config._attn_implementation == "sdpa" |
| and attention_mask is not None |
| and attention_mask.device.type == "cuda" |
| and not output_attentions |
| ): |
| |
| |
| |
| min_dtype = torch.finfo(dtype).min |
| causal_mask = AttentionMaskConverter._unmask_unattended( |
| causal_mask, min_dtype |
| ) |
|
|
| return causal_mask |
|
|
| @staticmethod |
| def _prepare_4d_causal_attention_mask_with_cache_position( |
| attention_mask: torch.Tensor, |
| sequence_length: int, |
| target_length: int, |
| dtype: torch.dtype, |
| cache_position: torch.Tensor, |
| batch_size: int, |
| **kwargs, |
| ): |
| """ |
| Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape |
| `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. |
| |
| Args: |
| attention_mask (`torch.Tensor`): |
| A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape |
| `(batch_size, 1, query_length, key_value_length)`. |
| sequence_length (`int`): |
| The sequence length being processed. |
| target_length (`int`): |
| The target length: when generating with static cache, the mask should be as long as the static cache, |
| to account for the 0 padding, the part of the cache that is not filled yet. |
| dtype (`torch.dtype`): |
| The dtype to use for the 4D attention mask. |
| cache_position (`torch.Tensor`): |
| Indices depicting the position of the input sequence tokens in the sequence. |
| batch_size (`torch.Tensor`): |
| Batch size. |
| """ |
| if attention_mask is not None and attention_mask.dim() == 4: |
| |
| causal_mask = attention_mask |
| else: |
| min_dtype = torch.finfo(dtype).min |
| causal_mask = torch.full( |
| (sequence_length, target_length), |
| fill_value=min_dtype, |
| dtype=dtype, |
| device=cache_position.device, |
| ) |
| if sequence_length != 1: |
| causal_mask = torch.triu(causal_mask, diagonal=1) |
| causal_mask *= torch.arange( |
| target_length, device=cache_position.device |
| ) > cache_position.reshape(-1, 1) |
| causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) |
| if attention_mask is not None: |
| causal_mask = ( |
| causal_mask.clone() |
| ) |
| mask_length = attention_mask.shape[-1] |
| padding_mask = ( |
| causal_mask[:, :, :, :mask_length] |
| + attention_mask[:, None, None, :] |
| ) |
| padding_mask = padding_mask == 0 |
| causal_mask[:, :, :, :mask_length] = causal_mask[ |
| :, :, :, :mask_length |
| ].masked_fill(padding_mask, min_dtype) |
|
|
| return causal_mask |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input |
| embeddings). |
| """ |
| ) |
| class GPT2LMHeadModel(GPT2PreTrainedModel, GenerationMixin): |
| _tied_weights_keys = ["lm_head.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.transformer = GPT2Model(config) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| def parallelize(self, device_map=None): |
| warnings.warn( |
| "`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" |
| " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" |
| " 0, 'transformer.h.1': 1, ...}", |
| FutureWarning, |
| ) |
| self.device_map = ( |
| get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) |
| if device_map is None |
| else device_map |
| ) |
| assert_device_map(self.device_map, len(self.transformer.h)) |
| self.transformer.parallelize(self.device_map) |
| self.lm_head = self.lm_head.to(self.transformer.first_device) |
| self.model_parallel = True |
|
|
| @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| def deparallelize(self): |
| warnings.warn( |
| "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| FutureWarning, |
| ) |
| self.transformer.deparallelize() |
| self.transformer = self.transformer.to("cpu") |
| self.lm_head = self.lm_head.to("cpu") |
| self.model_parallel = False |
| torch.cuda.empty_cache() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head = new_embeddings |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| **kwargs, |
| ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
| r""" |
| input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| sequence tokens in the vocabulary. |
| |
| If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| `input_ids`. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): |
| Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| cache_position=cache_position, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = transformer_outputs[0] |
|
|
| |
| if self.model_parallel: |
| torch.cuda.set_device(self.transformer.first_device) |
| hidden_states = hidden_states.to(self.lm_head.weight.device) |
|
|
| lm_logits = self.lm_head(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| |
| loss = self.loss_function( |
| lm_logits, |
| labels, |
| vocab_size=self.config.vocab_size, |
| **kwargs, |
| ) |
|
|
| if not return_dict: |
| output = (lm_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return CausalLMOutputWithCrossAttentions( |
| loss=loss, |
| logits=lm_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| cross_attentions=transformer_outputs.cross_attentions, |
| ) |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for |
| RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the |
| input embeddings, the classification head takes as input the input of a specified classification token index in the |
| input sequence). |
| """ |
| ) |
| class GPT2DoubleHeadsModel(GPT2PreTrainedModel, GenerationMixin): |
| _tied_weights_keys = ["lm_head.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| config.num_labels = 1 |
| self.transformer = GPT2Model(config) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| self.multiple_choice_head = GPT2SequenceSummary(config) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| def parallelize(self, device_map=None): |
| warnings.warn( |
| "`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should" |
| " load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your" |
| " own `device_map` but it needs to be a dictionary module_name to device, so for instance" |
| " {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", |
| FutureWarning, |
| ) |
| self.device_map = ( |
| get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) |
| if device_map is None |
| else device_map |
| ) |
| assert_device_map(self.device_map, len(self.transformer.h)) |
| self.transformer.parallelize(self.device_map) |
| self.lm_head = self.lm_head.to(self.transformer.first_device) |
| self.multiple_choice_head = self.multiple_choice_head.to( |
| self.transformer.first_device |
| ) |
| self.model_parallel = True |
|
|
| @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| def deparallelize(self): |
| warnings.warn( |
| "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| FutureWarning, |
| ) |
| self.transformer.deparallelize() |
| self.transformer = self.transformer.to("cpu") |
| self.lm_head = self.lm_head.to("cpu") |
| self.multiple_choice_head = self.multiple_choice_head.to("cpu") |
| self.model_parallel = False |
| torch.cuda.empty_cache() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head = new_embeddings |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| mc_token_ids: Optional[torch.LongTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| mc_labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| **kwargs, |
| ) -> Union[Tuple, GPT2DoubleHeadsModelOutput]: |
| r""" |
| input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| sequence tokens in the vocabulary. |
| |
| If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| `input_ids`. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): |
| Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - |
| 1]`. |
| labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): |
| Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to |
| `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]` |
| mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): |
| Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` |
| where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) |
| |
| Example: |
| |
| ```python |
| >>> import torch |
| >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel |
| |
| >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") |
| >>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2") |
| |
| >>> # Add a [CLS] to the vocabulary (we should train it also!) |
| >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) |
| >>> # Update the model embeddings with the new vocabulary size |
| >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) |
| |
| >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] |
| >>> encoded_choices = [tokenizer.encode(s) for s in choices] |
| >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] |
| |
| >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2 |
| >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1 |
| |
| >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) |
| >>> lm_logits = outputs.logits |
| >>> mc_logits = outputs.mc_logits |
| ```""" |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| cache_position=cache_position, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| hidden_states = transformer_outputs[0] |
|
|
| |
| if self.model_parallel: |
| torch.cuda.set_device(self.transformer.first_device) |
| hidden_states = hidden_states.to(self.lm_head.weight.device) |
|
|
| lm_logits = self.lm_head(hidden_states) |
| mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) |
|
|
| mc_loss = None |
| if mc_labels is not None: |
| loss_fct = CrossEntropyLoss() |
| mc_loss = loss_fct( |
| mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1) |
| ) |
| lm_loss = None |
| if labels is not None: |
| labels = labels.to(lm_logits.device) |
| shift_logits = lm_logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| loss_fct = CrossEntropyLoss() |
| lm_loss = loss_fct( |
| shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) |
| ) |
|
|
| if not return_dict: |
| output = (lm_logits, mc_logits) + transformer_outputs[1:] |
| if mc_loss is not None: |
| output = (mc_loss,) + output |
| return ((lm_loss,) + output) if lm_loss is not None else output |
|
|
| return GPT2DoubleHeadsModelOutput( |
| loss=lm_loss, |
| mc_loss=mc_loss, |
| logits=lm_logits, |
| mc_logits=mc_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
| @staticmethod |
| def _reorder_cache( |
| past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| ) -> Tuple[Tuple[torch.Tensor]]: |
| """ |
| This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
| [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| beam_idx at every generation step. |
| """ |
| return tuple( |
| tuple( |
| past_state.index_select(0, beam_idx.to(past_state.device)) |
| for past_state in layer_past |
| ) |
| for layer_past in past_key_values |
| ) |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| The GPT2 Model transformer with a sequence classification head on top (linear layer). |
| |
| [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models |
| (e.g. GPT-1) do. |
| |
| Since it does classification on the last token, it requires to know the position of the last token. If a |
| `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If |
| no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the |
| padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in |
| each row of the batch). |
| """ |
| ) |
| class GPT2ForSequenceClassification(GPT2PreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.transformer = GPT2Model(config) |
| self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, SequenceClassifierOutputWithPast]: |
| r""" |
| input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| sequence tokens in the vocabulary. |
| |
| If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| `input_ids`. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = transformer_outputs[0] |
| logits = self.score(hidden_states) |
|
|
| if input_ids is not None: |
| batch_size, sequence_length = input_ids.shape[:2] |
| else: |
| batch_size, sequence_length = inputs_embeds.shape[:2] |
|
|
| if self.config.pad_token_id is None and batch_size != 1: |
| raise ValueError( |
| "Cannot handle batch sizes > 1 if no padding token is defined." |
| ) |
| if self.config.pad_token_id is None: |
| last_non_pad_token = -1 |
| elif input_ids is not None: |
| |
| non_pad_mask = (input_ids != self.config.pad_token_id).to( |
| logits.device, torch.int32 |
| ) |
| token_indices = torch.arange( |
| input_ids.shape[-1], device=logits.device, dtype=torch.int32 |
| ) |
| last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) |
| else: |
| last_non_pad_token = -1 |
| logger.warning_once( |
| f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " |
| "unexpected if using padding tokens in conjunction with `inputs_embeds.`" |
| ) |
|
|
| pooled_logits = logits[ |
| torch.arange(batch_size, device=logits.device), last_non_pad_token |
| ] |
|
|
| loss = None |
| if labels is not None: |
| if self.config.problem_type is None: |
| if self.num_labels == 1: |
| self.config.problem_type = "regression" |
| elif self.num_labels > 1 and ( |
| labels.dtype == torch.long or labels.dtype == torch.int |
| ): |
| self.config.problem_type = "single_label_classification" |
| else: |
| self.config.problem_type = "multi_label_classification" |
|
|
| if self.config.problem_type == "regression": |
| loss_fct = MSELoss() |
| if self.num_labels == 1: |
| loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) |
| else: |
| loss = loss_fct(pooled_logits, labels) |
| elif self.config.problem_type == "single_label_classification": |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct( |
| pooled_logits.view(-1, self.num_labels), labels.view(-1) |
| ) |
| elif self.config.problem_type == "multi_label_classification": |
| loss_fct = BCEWithLogitsLoss() |
| loss = loss_fct(pooled_logits, labels) |
| if not return_dict: |
| output = (pooled_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutputWithPast( |
| loss=loss, |
| logits=pooled_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
|
|
| @auto_docstring |
| class GPT2ForTokenClassification(GPT2PreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
|
|
| self.transformer = GPT2Model(config) |
| if ( |
| hasattr(config, "classifier_dropout") |
| and config.classifier_dropout is not None |
| ): |
| classifier_dropout = config.classifier_dropout |
| elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: |
| classifier_dropout = config.hidden_dropout |
| else: |
| classifier_dropout = 0.1 |
| self.dropout = nn.Dropout(classifier_dropout) |
| self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, TokenClassifierOutput]: |
| r""" |
| input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| sequence tokens in the vocabulary. |
| |
| If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| `input_ids`. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| hidden_states = transformer_outputs[0] |
| hidden_states = self.dropout(hidden_states) |
| logits = self.classifier(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| labels = labels.to(logits.device) |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + transformer_outputs[2:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return TokenClassifierOutput( |
| loss=loss, |
| logits=logits, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
|
|
| @auto_docstring |
| class GPT2ForQuestionAnswering(GPT2PreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.transformer = GPT2Model(config) |
| self.qa_outputs = nn.Linear(config.hidden_size, 2) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| start_positions: Optional[torch.LongTensor] = None, |
| end_positions: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, QuestionAnsweringModelOutput]: |
| r""" |
| input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| sequence tokens in the vocabulary. |
| |
| If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| `input_ids`. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| outputs = self.transformer( |
| input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| sequence_output = outputs[0] |
|
|
| logits = self.qa_outputs(sequence_output) |
| start_logits, end_logits = logits.split(1, dim=-1) |
| start_logits = start_logits.squeeze(-1).contiguous() |
| end_logits = end_logits.squeeze(-1).contiguous() |
|
|
| total_loss = None |
| if start_positions is not None and end_positions is not None: |
| |
| if len(start_positions.size()) > 1: |
| start_positions = start_positions.squeeze(-1).to(start_logits.device) |
| if len(end_positions.size()) > 1: |
| end_positions = end_positions.squeeze(-1).to(end_logits.device) |
| |
| ignored_index = start_logits.size(1) |
| start_positions = start_positions.clamp(0, ignored_index) |
| end_positions = end_positions.clamp(0, ignored_index) |
|
|
| loss_fct = CrossEntropyLoss(ignore_index=ignored_index) |
| start_loss = loss_fct(start_logits, start_positions) |
| end_loss = loss_fct(end_logits, end_positions) |
| total_loss = (start_loss + end_loss) / 2 |
|
|
| if not return_dict: |
| output = (start_logits, end_logits) + outputs[2:] |
| return ((total_loss,) + output) if total_loss is not None else output |
|
|
| return QuestionAnsweringModelOutput( |
| loss=total_loss, |
| start_logits=start_logits, |
| end_logits=end_logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
|
|
| __all__ = [ |
| "GPT2DoubleHeadsModel", |
| "GPT2ForQuestionAnswering", |
| "GPT2ForSequenceClassification", |
| "GPT2ForTokenClassification", |
| "GPT2LMHeadModel", |
| "GPT2Model", |
| "GPT2PreTrainedModel", |
| "load_tf_weights_in_gpt2", |
| ] |
|
|