| import math |
| import os |
| import random |
| import warnings |
| from dataclasses import dataclass |
| from typing import Optional, Tuple, Union |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torch.utils.checkpoint |
| from einops import repeat |
| from torch import nn |
| from torch.cuda.amp import autocast |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
| from transformers.activations import ACT2FN |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPastAndCrossAttentions, |
| CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, |
| SequenceClassifierOutputWithPast, TokenClassifierOutput) |
| from transformers.modeling_utils import PreTrainedModel, SequenceSummary |
| from transformers.utils import (ModelOutput, logging) |
| from transformers.utils.model_parallel_utils import (assert_device_map, |
| get_device_map) |
| from collections import OrderedDict |
| from typing import Any, List, Mapping, Optional |
|
|
| from transformers import PreTrainedTokenizer, TensorType, is_torch_available |
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.utils import logging |
|
|
| logger = logging.get_logger(__name__) |
|
|
| class StageLinear(nn.Module): |
| def __init__(self, in_features=768, out_features=768, bias=True, stage=0, config=None): |
| super().__init__() |
| self.stage = stage |
| if self.stage==0: |
| self.module = nn.Linear(in_features, out_features, bias) |
| else: |
| transformer_config = TransformerConfig() |
| transformer_config.__dict__.update(config.__dict__) |
| transformer_config.__dict__.update({"hidden_size": config.stage_0_hidden_size}) |
| transformer_config.__dict__.update({"num_hidden_layers": config.stage_0_hidden_layers}) |
| transformer_config.__dict__.update({"num_attention_heads": config.stage_0_attention_heads}) |
| transformer_config.__dict__.update({"intermediate_size": config.stage_0_hidden_size * 4}) |
| self.in_proj = nn.Linear(in_features, config.stage_0_hidden_size, bias=bias) |
| self.h = nn.ModuleList( |
| [TransformerBlock(transformer_config) for i in range(transformer_config.num_hidden_layers)] |
| ) |
| self.ln_f = LlamaRMSNorm(config.stage_0_hidden_size, eps=config.layer_norm_epsilon) |
| self.out_proj = nn.Linear(config.stage_0_hidden_size, out_features, bias=bias) |
| def forward(self, x): |
| if self.stage==0: |
| return self.module(x) |
| |
| x = self.in_proj(x) |
| for block in self.h: |
| x_new, attn_outs = block(x) |
| x = x + x_new |
| x = self.out_proj(x) |
| return x |
|
|
| class TransformerConfig(PretrainedConfig): |
| model_type = "Transformer" |
| keys_to_ignore_at_inference = ["past_key_values"] |
| attribute_map = { |
| "hidden_size": "hidden_size", |
| "max_position_embeddings": "max_position_embeddings", |
| "num_attention_heads": "num_attention_heads", |
| "num_hidden_layers": "num_hidden_layers", |
| } |
|
|
| def __init__( |
| self, |
| vocab_size=32000, |
| max_position_embeddings=2048, |
| expanded_lm_head_size=8192, |
| hidden_size=768, |
| stage_0_hidden_size=256, |
| stage_0_hidden_layers=1, |
| stage_0_attention_heads=8, |
| kv_hidden_size=None, |
| num_hidden_layers=10, |
| num_attention_heads=12, |
| intermediate_size=None, |
| activation_function="silu", |
| layer_norm_epsilon=1e-6, |
| initializer_range=0.02, |
| use_cache=True, |
| bos_token_id=1, |
| eos_token_id=2, |
| combined_qkv=True, |
| use_bias=False, |
| rope_scaling=None, |
| rope_theta=10000, |
| tie_word_embeddings=False, |
| |
| |
| **kwargs, |
| ): |
| self.stage_0_hidden_size = stage_0_hidden_size |
| self.stage_0_hidden_layers = stage_0_hidden_layers |
| self.stage_0_attention_heads = stage_0_attention_heads |
| self.expanded_lm_head_size = expanded_lm_head_size |
| self.tie_word_embeddings = tie_word_embeddings |
| self.rope_theta=rope_theta |
| self.rope_scaling=rope_scaling |
| self.kv_hidden_size = kv_hidden_size |
| self.use_bias = use_bias |
| self.combined_qkv = combined_qkv |
| self.vocab_size = vocab_size |
| self.max_position_embeddings = max_position_embeddings |
| self.hidden_size = hidden_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.intermediate_size = ( |
| intermediate_size if intermediate_size is not None else hidden_size * 4 |
| ) |
| self.activation_function = activation_function |
| self.layer_norm_epsilon = layer_norm_epsilon |
| self.initializer_range = initializer_range |
|
|
| self.use_cache = use_cache |
|
|
| self.bos_token_id = bos_token_id |
| self.eos_token_id = eos_token_id |
|
|
| super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
|
|
|
|
| from transformers.models.llama.modeling_llama import LlamaRMSNorm, LlamaDynamicNTKScalingRotaryEmbedding, LlamaRotaryEmbedding, LlamaLinearScalingRotaryEmbedding |
|
|
| def rotate_half(x): |
| """Rotates half the hidden dims of the input.""" |
| x1 = x[..., : x.shape[-1] // 2] |
| x2 = x[..., x.shape[-1] // 2 :] |
| return torch.cat((-x2, x1), dim=-1) |
|
|
| def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): |
| cos = cos[position_ids].unsqueeze(unsqueeze_dim) |
| sin = sin[position_ids].unsqueeze(unsqueeze_dim) |
| q_embed = (q * cos) + (rotate_half(q) * sin) |
| k_embed = (k * cos) + (rotate_half(k) * sin) |
| return q_embed, k_embed |
|
|
| class TransformerAttention(nn.Module): |
| def __init__(self, config, stage): |
| super().__init__() |
| self.config = config |
| self.stage = stage |
| self.max_position_embeddings = config.max_position_embeddings |
| self.rope_theta = config.rope_theta |
| self.head_dim = config.hidden_size // config.num_attention_heads |
| assert ( |
| self.head_dim * config.num_attention_heads == config.hidden_size |
| ), "d_model must be divisible by n_head" |
| self.use_bias = config.use_bias |
|
|
| if not config.combined_qkv or config.kv_hidden_size is not None: |
| self.query = StageLinear( |
| config.hidden_size, config.hidden_size, bias=self.use_bias, stage=stage, config=config |
| ) |
| self.key = StageLinear( |
| config.hidden_size |
| if not config.kv_hidden_size |
| else config.kv_hidden_size, |
| config.hidden_size, |
| bias=self.use_bias, |
| stage=stage, config=config |
| ) |
| self.value = StageLinear( |
| config.hidden_size |
| if not config.kv_hidden_size |
| else config.kv_hidden_size, |
| config.hidden_size, |
| bias=self.use_bias, stage=stage, config=config |
| ) |
| else: |
| self.qkv = StageLinear( |
| config.hidden_size, config.hidden_size * 3, bias=self.use_bias, stage=stage, config=config |
| ) |
| self.out = StageLinear(config.hidden_size, config.hidden_size, bias=self.use_bias, stage=stage, config=config) |
| self._init_rope() |
|
|
| def _init_rope(self): |
| if self.config.rope_scaling is None: |
| self.rotary_emb = LlamaRotaryEmbedding( |
| self.head_dim, |
| max_position_embeddings=self.config.max_position_embeddings, |
| base=self.config.rope_theta, |
| ) |
| else: |
| scaling_type = self.config.rope_scaling["type"] |
| scaling_factor = self.config.rope_scaling["factor"] |
| if scaling_type == "linear": |
| self.rotary_emb = LlamaLinearScalingRotaryEmbedding( |
| self.head_dim, |
| max_position_embeddings=self.config.max_position_embeddings, |
| scaling_factor=scaling_factor, |
| base=self.config.rope_theta, |
| ) |
| elif scaling_type == "dynamic": |
| self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( |
| self.head_dim, |
| max_position_embeddings=self.max_position_embeddings, |
| scaling_factor=scaling_factor, |
| base=self.config.rope_theta, |
| ) |
| else: |
| raise ValueError(f"Unknown RoPE scaling type {scaling_type}") |
|
|
| def forward(self, x0, x1=None, causal=False, mask=None, position_ids=None, use_cache=True, layer_past=None): |
| batch_size = x0.size(0) |
|
|
| def split_heads(x): |
| return x.view( |
| batch_size, -1, self.config.num_attention_heads, self.head_dim |
| ).transpose(1, 2) |
|
|
| if not self.config.combined_qkv: |
| q = split_heads(self.query(x0)) |
| k = split_heads(self.key(x1) if x1 is not None else self.key(x0)) |
| v = split_heads( |
| self.value(x1 if x1 is not None else x0) |
| ) |
| else: |
| q, k, v = self.qkv(x0).chunk(3,-1) |
| q = split_heads(q) |
| k = split_heads(k) |
| v = split_heads(v) |
|
|
| if layer_past is not None: |
| past_key, past_value = layer_past |
| k = torch.cat((past_key, k), dim=-2) |
| v = torch.cat((past_value, v), dim=-2) |
|
|
| cos, sin = self.rotary_emb(v, position_ids) |
| q, k = apply_rotary_pos_emb(q, k, cos, sin, position_ids) |
|
|
| if use_cache is True: |
| present = (k,v) |
| else: |
| present = None |
|
|
| attn_output = F.scaled_dot_product_attention( |
| q, k, v, attn_mask=None, dropout_p=0.0, is_causal=causal |
| ) |
| attn_output = ( |
| attn_output.transpose(1, 2) |
| .contiguous() |
| .view(batch_size, -1, self.config.hidden_size) |
| ) |
| return self.out(attn_output), present |
|
|
|
|
| class MLP(nn.Module): |
| def __init__(self, config, stage=0): |
| super().__init__() |
| self.config = config |
| self.stage = stage |
| self.gate_proj = StageLinear( |
| config.hidden_size, config.intermediate_size, bias=False, stage=stage, config=config |
| ) |
| self.up_proj = StageLinear( |
| config.hidden_size, config.intermediate_size, bias=False, stage=stage, config=config |
| ) |
| self.down_proj = StageLinear( |
| config.intermediate_size, config.hidden_size, bias=False, stage=stage, config=config |
| ) |
| self.act_fn = ACT2FN[config.activation_function] |
|
|
| def forward(self, x): |
| return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) |
|
|
| class TransformerBlock(nn.Module): |
| def __init__(self, config, stage=0): |
| super().__init__() |
| self.config = config |
| self.stage = stage |
| self.attn = TransformerAttention(config, stage) |
| self.ffn = MLP(config, stage) |
| self.ln1 = LlamaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) |
| self.ln2 = LlamaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) |
|
|
| def forward(self, x, mask=None, position_ids=None, use_cache=True, layer_past=None): |
| attn_in = self.ln1(x) |
| ffn_in = self.ln2(x) |
| attn_out, attn_outs = self.attn(attn_in, causal=True, mask=mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past) |
| ffn_out = self.ffn(ffn_in) |
| x = x + attn_out + ffn_out |
| if not use_cache: attn_outs = None |
| return (x, attn_outs) |
|
|
| class TransformerPreTrainedModel(PreTrainedModel): |
| config_class = TransformerConfig |
| base_model_prefix = "transformer" |
| is_parallelizable = False |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["TransformerBlock"] |
| _skip_keys_device_placement = "past_key_values" |
|
|
| def __init__(self, *inputs, **kwargs): |
| super().__init__(*inputs, **kwargs) |
|
|
| def _init_weights(self, module): |
| """Initialize the weights.""" |
| if isinstance(module, (nn.Linear)): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
| |
| |
| |
| |
| class TransformerModel(TransformerPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.wte = nn.Embedding(config.vocab_size, config.hidden_size) |
| self.h = nn.ModuleList( |
| [TransformerBlock(config, stage=1) for i in range(config.num_hidden_layers)] |
| ) |
| self.ln_f = LlamaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) |
| self.model_parallel = False |
| self.device_map = None |
| self.gradient_checkpointing = False |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.wte |
|
|
| def set_input_embeddings(self, new_embeddings): |
| self.wte = new_embeddings |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| |
|
|
| output_attentions = ( |
| output_attentions |
| if output_attentions is not None |
| else self.config.output_attentions |
| ) |
| output_hidden_states = ( |
| output_hidden_states |
| if output_hidden_states is not None |
| else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError( |
| "You cannot specify both input_ids and inputs_embeds at the same time" |
| ) |
| elif input_ids is not None: |
| self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| batch_size = input_ids.shape[0] |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| batch_size = inputs_embeds.shape[0] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
| if token_type_ids is not None: |
| token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
| if position_ids is not None: |
| position_ids = position_ids.view(-1, input_shape[-1]) |
|
|
| if past_key_values is None: |
| past_length = 0 |
| past_key_values = tuple([None] * len(self.h)) |
| else: |
| past_length = past_key_values[0][0].size(-2) |
| if position_ids is None: |
| position_ids = torch.arange( |
| past_length, |
| input_shape[-1] + past_length, |
| dtype=torch.long, |
| device=device, |
| ) |
| position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) |
|
|
| if attention_mask is not None: |
| if batch_size <= 0: |
| raise ValueError("batch_size has to be defined and > 0") |
| attention_mask = attention_mask.view(batch_size, -1) |
| attention_mask = attention_mask[:, None, None, :] |
| attention_mask = attention_mask.to(dtype=self.dtype) |
| attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
|
|
| if self.config.add_cross_attention and encoder_hidden_states is not None: |
| ( |
| encoder_batch_size, |
| encoder_sequence_length, |
| _, |
| ) = encoder_hidden_states.size() |
| encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| if encoder_attention_mask is None: |
| encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| else: |
| encoder_attention_mask = None |
|
|
| head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.wte(input_ids) |
| |
| |
| hidden_states = inputs_embeds |
|
|
| if token_type_ids is not None: |
| token_type_embeds = self.wte(token_type_ids) |
| hidden_states = hidden_states + token_type_embeds |
|
|
| |
| output_shape = (-1,) + (hidden_states.shape[1],) + (hidden_states.size(-1),) |
| |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| |
| |
| |
| use_cache = False |
|
|
| presents = () if use_cache else None |
| all_self_attentions = () if output_attentions else None |
| all_cross_attentions = ( |
| () if output_attentions and self.config.add_cross_attention else None |
| ) |
| all_hidden_states = () if output_hidden_states else None |
| for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): |
| if self.model_parallel: |
| torch.cuda.set_device(hidden_states.device) |
| if layer_past is not None: |
| layer_past = tuple( |
| past_state.to(hidden_states.device) |
| for past_state in layer_past |
| ) |
| if attention_mask is not None: |
| attention_mask = attention_mask.to(hidden_states.device) |
| if isinstance(head_mask, torch.Tensor): |
| head_mask = head_mask.to(hidden_states.device) |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
| outputs = block(hidden_states, mask=attention_mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past) |
| hidden_states = outputs[0] |
| if use_cache == True: |
| presents = presents + (outputs[1],) |
|
|
| hidden_states = self.ln_f(hidden_states) |
| hidden_states = hidden_states.view(output_shape) |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple( |
| v |
| for v in [hidden_states, None, all_hidden_states, None, None] |
| if v is not None |
| ) |
|
|
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=presents, |
| hidden_states=all_hidden_states, |
| attentions=None, |
| cross_attentions=None, |
| ) |
|
|
| class TransformerModelForCausalLM(TransformerPreTrainedModel): |
| _tied_weights_keys = ["lm_head.weight"] |
| _tied_weights_keys = [] |
| def __init__(self, config): |
| super().__init__(config) |
| self.transformer = TransformerModel(config) |
| |
| |
| |
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) |
| self.model_parallel = False |
| self.device_map = None |
| self.post_init() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head = new_embeddings |
|
|
| def prepare_inputs_for_generation( |
| self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs |
| ): |
| token_type_ids = kwargs.get("token_type_ids", None) |
| |
| if past_key_values: |
| input_ids = input_ids[:, -1].unsqueeze(-1) |
| if token_type_ids is not None: |
| token_type_ids = token_type_ids[:, -1].unsqueeze(-1) |
|
|
| attention_mask = kwargs.get("attention_mask", None) |
| position_ids = kwargs.get("position_ids", None) |
|
|
| if attention_mask is not None and position_ids is None: |
| |
| position_ids = attention_mask.long().cumsum(-1) - 1 |
| position_ids.masked_fill_(attention_mask == 0, 1) |
| if past_key_values: |
| position_ids = position_ids[:, -1].unsqueeze(-1) |
| else: |
| position_ids = None |
|
|
| |
| if inputs_embeds is not None and past_key_values is None: |
| model_inputs = {"inputs_embeds": inputs_embeds} |
| else: |
| model_inputs = {"input_ids": input_ids} |
|
|
| model_inputs.update( |
| { |
| "past_key_values": past_key_values, |
| "use_cache": kwargs.get("use_cache"), |
| "position_ids": position_ids, |
| "attention_mask": attention_mask, |
| "token_type_ids": token_type_ids, |
| } |
| ) |
| return model_inputs |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = transformer_outputs[0] |
| |
| if self.model_parallel: |
| torch.cuda.set_device(self.transformer.first_device) |
| hidden_states = hidden_states.to(self.lm_head.weight.device) |
|
|
| lm_logits = self.lm_head(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| |
| labels = labels.to(lm_logits.device) |
| |
| shift_logits = lm_logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct( |
| shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) |
| ) |
|
|
| if not return_dict: |
| output = (lm_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return CausalLMOutputWithCrossAttentions( |
| loss=loss, |
| logits=lm_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| cross_attentions=transformer_outputs.cross_attentions, |
| ) |
|
|
| @staticmethod |
| def _reorder_cache( |
| past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| ) -> Tuple[Tuple[torch.Tensor]]: |
| """ |
| This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
| [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| beam_idx at every generation step. |
| """ |
| return tuple( |
| tuple( |
| past_state.index_select(0, beam_idx.to(past_state.device)) |
| for past_state in layer_past |
| ) |
| for layer_past in past_key_values |
| ) |
|
|