| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ PyTorch StableLM-Alpha model. """ |
| from typing import Optional, Tuple, Union |
| import math |
|
|
| import torch |
| import torch.utils.checkpoint |
| from torch import nn |
| from torch.nn import CrossEntropyLoss |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPast, |
| CausalLMOutputWithPast, |
| ) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import logging |
|
|
| from .configuration_stablelm_alpha import StableLMAlphaConfig |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): |
| """Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, tgt_seq_len, src_seq_len]`.""" |
| batch_size, src_len = mask.size() |
| tgt_len = tgt_len if tgt_len is not None else src_len |
|
|
| expanded_mask = mask[:, None, None, :].expand(batch_size, 1, tgt_len, src_len).to(dtype) |
| inverted_mask = 1.0 - expanded_mask |
|
|
| return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) |
|
|
|
|
| class LayerNorm(nn.LayerNorm): |
| def __init__(self, normalized_shape: torch.Size, bias: bool = True, **kwargs): |
| r""" |
| bias (`bool`, default = True): whether to use the bias term. |
| """ |
| super().__init__(normalized_shape, **kwargs) |
| if not bias: |
| self.bias = None |
|
|
|
|
| class DecoderLayer(nn.Module): |
| def __init__(self, config: StableLMAlphaConfig): |
| super().__init__() |
|
|
| self.norm = LayerNorm(config.hidden_size, eps=config.norm_eps) |
| self.attention = Attention(config) |
| self.mlp = MLP(config) |
|
|
| def forward( |
| self, |
| hidden_states: Optional[torch.FloatTensor], |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = False, |
| ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: |
| residual = hidden_states |
|
|
| |
| hidden_states = self.norm(hidden_states) |
|
|
| |
| attn_output, attn_weights, present_key_value = self.attention( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_value=past_key_value, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| ) |
|
|
| |
| mlp_output = self.mlp(hidden_states) |
|
|
| hidden_states = residual + attn_output + mlp_output |
|
|
| outputs = (hidden_states,) |
| if output_attentions: |
| outputs += (attn_weights,) |
| if use_cache: |
| outputs += (present_key_value,) |
| return outputs |
|
|
|
|
| class MLP(nn.Module): |
| def __init__(self, config: StableLMAlphaConfig): |
| super().__init__() |
|
|
| hidden_size = config.hidden_size |
| multiple_of = 256 |
| ff_dim = int(8 * hidden_size / 3) |
| intermediate_size = multiple_of * ((ff_dim + multiple_of - 1) // multiple_of) |
|
|
| self.gate_proj = nn.Linear(hidden_size, 2 * intermediate_size, bias=False) |
| self.out_proj = nn.Linear(intermediate_size, hidden_size, bias=False) |
| self.act = nn.SiLU() |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| ff, ff_gate = self.gate_proj(x).chunk(2, dim=-1) |
| return self.out_proj(ff * self.act(ff_gate)) |
|
|
|
|
| class RotaryEmbedding(nn.Module): |
| def __init__( |
| self, |
| dim: int, |
| max_position_embeddings: int, |
| base: int = 10_000, |
| device: Optional[torch.device] = None, |
| ): |
| super().__init__() |
|
|
| self.dim = dim |
| self.max_position_embeddings = max_position_embeddings |
| self.base = base |
| inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)) |
| self.register_buffer("inv_freq", inv_freq, persistent=False) |
|
|
| |
| self._set_cos_sin_cache( |
| seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() |
| ) |
|
|
| def _set_cos_sin_cache(self, seq_len: int, device: torch.device, dtype: torch.dtype): |
| self.max_seq_len_cached = seq_len |
| t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) |
| freqs = torch.outer(t, self.inv_freq) |
| |
| emb = torch.cat((freqs, freqs), dim=-1) |
| self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) |
| self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) |
|
|
| def forward(self, x: torch.Tensor, seq_len: Optional[int] = None): |
| |
| if seq_len > self.max_seq_len_cached: |
| self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.get_default_dtype()) |
| return ( |
| self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), |
| self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), |
| ) |
|
|
|
|
| def rotate_half(x: torch.Tensor): |
| """Rotates half the hidden dims of the input.""" |
| x1, x2 = torch.chunk(x, 2, dim=-1) |
| return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
| def apply_rotary_pos_emb(q, k, cos, sin, position_ids): |
| |
| cos = cos.squeeze(1).squeeze(0) |
| sin = sin.squeeze(1).squeeze(0) |
| cos = cos[position_ids].unsqueeze(1) |
| sin = sin[position_ids].unsqueeze(1) |
| q_embed = (q * cos) + (rotate_half(q) * sin) |
| k_embed = (k * cos) + (rotate_half(k) * sin) |
| return q_embed, k_embed |
|
|
|
|
| class Attention(nn.Module): |
| def __init__(self, config: StableLMAlphaConfig): |
| super().__init__() |
|
|
| self.config = config |
| self.hidden_size = config.hidden_size |
| self.num_heads = config.num_heads |
| self.head_dim = self.hidden_size // self.num_heads |
| self.max_position_embeddings = config.max_position_embeddings |
| if self.hidden_size % self.num_heads != 0: |
| raise ValueError( |
| "`hidden_size` is not divisble by the number of attention heads! Make sure to update them" |
| ) |
|
|
| self.qkv_proj = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) |
| self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) |
| self._init_rope() |
|
|
| def _init_rope(self): |
| self.rotary_ndims = int(self.head_dim * self.config.rotary_pct) |
| self.rotary_emb = RotaryEmbedding( |
| self.rotary_ndims, |
| max_position_embeddings=self.config.max_position_embeddings, |
| base=self.config.rotary_emb_base, |
| ) |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| attention_mask: torch.FloatTensor, |
| position_ids: torch.LongTensor, |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = False, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| has_past_key_value = past_key_value is not None |
|
|
| |
| |
| qkv = self.qkv_proj(hidden_states) |
|
|
| |
| new_qkv_shape = qkv.size()[:-1] + (self.num_heads, 3 * self.head_dim) |
| qkv = qkv.view(*new_qkv_shape) |
|
|
| |
| query = qkv[..., : self.head_dim].permute(0, 2, 1, 3) |
| key = qkv[..., self.head_dim:(2 * self.head_dim)].permute(0, 2, 1, 3) |
| value = qkv[..., (2 * self.head_dim):].permute(0, 2, 1, 3) |
|
|
| |
| |
| query_rot = query[..., :self.rotary_ndims] |
| query_pass = query[..., self.rotary_ndims:] |
| key_rot = key[..., :self.rotary_ndims] |
| key_pass = key[..., self.rotary_ndims:] |
|
|
| |
| kv_seq_len = key.shape[-2] |
| if has_past_key_value: |
| kv_seq_len += past_key_value[0].shape[-2] |
|
|
| |
| cos, sin = self.rotary_emb(value, seq_len=kv_seq_len) |
| query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) |
|
|
| |
| |
| query = torch.cat((query, query_pass), dim=-1) |
| key = torch.cat((key, key_pass), dim=-1) |
|
|
| |
| if has_past_key_value: |
| key = torch.cat((past_key_value[0], key), dim=2) |
| value = torch.cat((past_key_value[1], value), dim=2) |
| present_key_value = (key, value) if use_cache else None |
|
|
| |
| query = query.transpose(1, 2).contiguous() |
| key = key.transpose(1, 2).contiguous() |
| value = value.transpose(1, 2).contiguous() |
|
|
| |
| softmax_scale = 1 / math.sqrt(self.head_dim) |
| attn_scores = torch.einsum('bthd,bshd->bhts', query, key * softmax_scale) |
| |
| if attention_mask is not None: |
| attn_scores = attn_scores + attention_mask |
| attn_weights = nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32).to(query.dtype) |
| attn_output = torch.einsum('bhts,bshd->bthd', attn_weights, value) |
|
|
| |
| attn_output = attn_output.reshape(attn_output.shape[0], attn_output.shape[1], -1) |
|
|
| |
| attn_output = self.out_proj(attn_output) |
|
|
| if not output_attentions: |
| attn_weights = None |
|
|
| return attn_output, attn_weights, present_key_value |
|
|
|
|
| def attention_mask_func(attention_scores: torch.Tensor, ltor_mask: torch.Tensor): |
| attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min) |
| return attention_scores |
|
|
|
|
| class StableLMAlphaPreTrainedModel(PreTrainedModel): |
| """An abstract class to handle weights initialization and a simple interface |
| for downloading and loading pretrained models. |
| """ |
|
|
| config_class = StableLMAlphaConfig |
| base_model_prefix = "transformer" |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["DecoderLayer"] |
| _skip_keys_device_placement = "past_key_values" |
|
|
| def _init_weights(self, module: nn.Module): |
| """Initialize the weights""" |
| if isinstance(module, nn.Linear): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
| def _set_gradient_checkpointing(self, module: nn.Module, value=False): |
| if isinstance(module, StableLMAlphaModel): |
| module.gradient_checkpointing = value |
|
|
|
|
| def _make_causal_mask( |
| input_ids_shape: torch.Size, |
| dtype: torch.dtype, |
| device: torch.device, |
| past_key_values_length: int = 0 |
| ): |
| """Make causal mask used for bi-directional self-attention.""" |
| batch_size, tgt_len = input_ids_shape |
| mask = torch.full((tgt_len, tgt_len), torch.finfo(torch.float16).min, device=device) |
| mask_cond = torch.arange(mask.size(-1), device=device) |
| mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) |
| mask = mask.to(dtype) |
| if past_key_values_length > 0: |
| mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) |
| return mask[None, None, :, :].expand(batch_size, 1, tgt_len, tgt_len + past_key_values_length) |
|
|
|
|
| class StableLMAlphaModel(StableLMAlphaPreTrainedModel): |
| def __init__(self, config: StableLMAlphaConfig): |
| super().__init__(config) |
| self.config = config |
|
|
| self.embed = nn.Embedding(config.vocab_size, config.hidden_size) |
| self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)]) |
| self.final_norm = LayerNorm(config.hidden_size, eps=config.norm_eps) |
|
|
| self.gradient_checkpointing = False |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.embed |
|
|
| def set_input_embeddings(self, value: nn.Module): |
| self.embed = value |
|
|
| |
| def _prepare_decoder_attention_mask( |
| self, |
| attention_mask: torch.Tensor, |
| input_shape: torch.Size, |
| inputs_embeds: torch.Tensor, |
| past_key_values_length: int, |
| ): |
| |
| |
| combined_attention_mask = None |
| if input_shape[-1] > 1: |
| combined_attention_mask = _make_causal_mask( |
| input_shape, |
| inputs_embeds.dtype, |
| device=inputs_embeds.device, |
| past_key_values_length=past_key_values_length, |
| ) |
|
|
| if attention_mask is not None: |
| |
| expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( |
| inputs_embeds.device |
| ) |
| combined_attention_mask = ( |
| expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask |
| ) |
|
|
| return combined_attention_mask |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPast]: |
| r""" |
| past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` |
| with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): |
| Contains precomputed key and value hidden states of the attention blocks. |
| Can be used to speed up decoding. If `past_key_values` are used, the user |
| can optionally input only the last `decoder_input_ids` (those that don't |
| have their past key value states given to this model) of shape `(batch_size, 1)` |
| instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and |
| can be used to speed up decoding (see `past_key_values`). |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| elif input_ids is not None: |
| input_shape = input_ids.size() |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| batch_size, seq_length = input_shape |
|
|
| if past_key_values is None: |
| past_key_values_length = 0 |
| past_key_values = tuple([None] * self.config.num_hidden_layers) |
| seq_length_with_past = seq_length |
| else: |
| past_key_values_length = past_key_values[0][0].shape[2] |
| seq_length_with_past = seq_length + past_key_values_length |
|
|
| if position_ids is None: |
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
| position_ids = torch.arange(past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device) |
| position_ids = position_ids.unsqueeze(0).view(-1, seq_length) |
| else: |
| position_ids = position_ids.view(-1, seq_length).long() |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed(input_ids) |
|
|
| |
| if attention_mask is None: |
| attention_mask = torch.ones( |
| (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device |
| ) |
| attention_mask = self._prepare_decoder_attention_mask( |
| attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length |
| ) |
|
|
| hidden_states = inputs_embeds |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| all_hidden_states = () if output_hidden_states else None |
| all_attentions = () if output_attentions else None |
| present_key_values = () if use_cache else None |
|
|
| for _, (decoder_layer, past_key_value) in enumerate(zip(self.layers, past_key_values)): |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if self.gradient_checkpointing and self.training: |
|
|
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| |
| return module(*inputs, output_attentions, None) |
|
|
| return custom_forward |
|
|
| outputs = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(decoder_layer), |
| hidden_states, |
| attention_mask, |
| position_ids, |
| |
| None, |
| ) |
| else: |
| outputs = decoder_layer( |
| hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_value=past_key_value, |
| output_attentions=output_attentions, |
| use_cache=use_cache, |
| ) |
|
|
| hidden_states = outputs[0] |
|
|
| if output_attentions: |
| all_attentions = all_attentions + (outputs[1],) |
|
|
| if use_cache: |
| present_key_values += (outputs[2 if output_attentions else 1],) |
|
|
| hidden_states = self.final_norm(hidden_states) |
|
|
| |
| if output_hidden_states: |
| all_hidden_states += (hidden_states,) |
|
|
| present_key_values = present_key_values if use_cache else None |
| if not return_dict: |
| return tuple(v for v in [hidden_states, present_key_values, all_hidden_states, all_attentions] if v is not None) |
|
|
| return BaseModelOutputWithPast( |
| last_hidden_state=hidden_states, |
| past_key_values=present_key_values, |
| hidden_states=all_hidden_states, |
| attentions=all_attentions, |
| ) |
|
|
|
|
| class StableLMAlphaForCausalLM(StableLMAlphaPreTrainedModel): |
| _tied_weights_keys = ["lm_head.weight"] |
|
|
| def __init__(self, config: StableLMAlphaConfig): |
| super().__init__(config) |
|
|
| self.transformer = StableLMAlphaModel(config) |
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
| self.post_init() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings: nn.Module): |
| self.lm_head = new_embeddings |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CausalLMOutputWithPast]: |
| r""" |
| Example: |
| |
| ```python |
| >>> from transformers import AutoTokenizer, StableLMAlphaForCausalLM, StableLMAlphaConfig |
| >>> import torch |
| |
| >>> tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-base-alpha-3b-v2", trust_remote_code=True) |
| >>> config = StableLMAlphaConfig.from_pretrained("stabilityai/stablelm-base-alpha-3b-v2") |
| >>> config.is_decoder = True |
| >>> model = StableLMAlphaForCausalLM.from_pretrained("stabilityai/stablelm-base-alpha-3b-v2", config=config) |
| |
| >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") |
| >>> outputs = model(**inputs) |
| |
| >>> logits = outputs.logits |
| ``` |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.transformer( |
| input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| inputs_embeds=inputs_embeds, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| hidden_states = outputs[0] |
| logits = self.lm_head(hidden_states) |
|
|
| lm_loss = None |
| if labels is not None: |
| |
| labels = labels.to(logits.device) |
| |
| shift_logits = logits[:, :-1, :].contiguous() |
| labels = labels[:, 1:].contiguous() |
| loss_fct = CrossEntropyLoss() |
| lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[1:] |
| return ((lm_loss,) + output) if lm_loss is not None else output |
|
|
| return CausalLMOutputWithPast( |
| loss=lm_loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
| def prepare_inputs_for_generation( |
| self, |
| input_ids, |
| past_key_values: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.Tensor] = None, |
| **kwargs |
| ): |
| |
| if past_key_values and past_key_values[0] is not None: |
| input_ids = input_ids[:, -1:] |
|
|
| position_ids = kwargs.get("position_ids", None) |
| if attention_mask is not None and position_ids is None: |
| |
| position_ids = attention_mask.long().cumsum(-1) - 1 |
| position_ids.masked_fill_(attention_mask == 0, 1) |
| if past_key_values: |
| position_ids = position_ids[:, -1].unsqueeze(-1) |
|
|
| |
| if inputs_embeds is not None and past_key_values is None: |
| model_inputs = {"inputs_embeds": inputs_embeds} |
| else: |
| model_inputs = {"input_ids": input_ids} |
|
|
| model_inputs.update( |
| { |
| "attention_mask": attention_mask, |
| "past_key_values": past_key_values, |
| "position_ids": position_ids, |
| } |
| ) |
|
|
| return model_inputs |
|
|
| def _reorder_cache(self, past_key_values: torch.Tensor, beam_idx: int): |
| reordered_past = () |
| for past_key_value in past_key_values: |
| reordered_past += ( |
| tuple(past_state.index_select(0, beam_idx) for past_state in past_key_value[:2]) + past_key_value[2:], |
| ) |
| return reordered_past |
|
|
|
|
| StableLMAlphaConfig.register_for_auto_class() |
| StableLMAlphaForCausalLM.register_for_auto_class("AutoModelForCausalLM") |
|
|