| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from dataclasses import dataclass |
| from typing import Any, Callable, Optional, Union |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torch_npu |
| from einops import rearrange |
|
|
| from transformers.cache_utils import Cache |
| from transformers.generation import GenerationMixin |
| from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
| from transformers.modeling_layers import GradientCheckpointingLayer |
| from transformers.modeling_outputs import ModelOutput |
| from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| from transformers.processing_utils import Unpack |
| from transformers.utils import LossKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging |
|
|
| from .configuration_openpangu_vl import OpenPanguVLConfig as OpenPanguConfig |
| from .configuration_openpangu_vl import OpenPanguVLTextConfig, OpenPanguVLVisionConfig |
| from .modeling_openpangu_embedded import PanguEmbeddedConfig, PanguEmbeddedMLP, PanguEmbeddedModel, PanguEmbeddedRMSNorm |
| from .imageprocessor_openpangu_vl import rescale_and_normalize |
| if "910" in torch.npu.get_device_name(): |
| NPU_ATTN_INFR = True |
| print("[INFO] torch_npu detected. Using NPU fused infer attention.") |
| else: |
| NPU_ATTN_INFR = False |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class OpenPanguVLMLP(PanguEmbeddedMLP): |
| pass |
|
|
|
|
| class OpenPanguVisionPatchEmbed(nn.Module): |
| def __init__( |
| self, |
| patch_size: int = 14, |
| temporal_patch_size: int = 2, |
| in_channels: int = 3, |
| embed_dim: int = 1152, |
| ) -> None: |
| super().__init__() |
| self.patch_size = patch_size |
| self.temporal_patch_size = temporal_patch_size |
| self.in_channels = in_channels |
| self.embed_dim = embed_dim |
|
|
| kernel_size = [temporal_patch_size, patch_size, patch_size] |
| self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False) |
| self.input_size = self.patch_size * self.patch_size * in_channels * self.temporal_patch_size |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| if hidden_states.shape[-1] != self.input_size: |
| hidden_states = torch.cat([hidden_states.reshape(-1, self.patch_size * self.patch_size), \ |
| hidden_states.reshape(-1, self.patch_size * self.patch_size)], dim=-1).reshape(-1, self.input_size) |
| target_dtype = self.proj.weight.dtype |
| hidden_states = hidden_states.view( |
| -1, |
| self.in_channels, |
| self.temporal_patch_size, |
| self.patch_size, |
| self.patch_size, |
| ) |
| hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) |
| return hidden_states |
|
|
|
|
| class OpenPanguVLPatchEmbed(OpenPanguVisionPatchEmbed): |
| pass |
|
|
|
|
| class OpenPanguVisionRotaryEmbedding(nn.Module): |
| def __init__(self, dim: int, theta: float = 10000.0) -> None: |
| super().__init__() |
| inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) |
| self.register_buffer("inv_freq", inv_freq, persistent=False) |
|
|
| def forward(self, seqlen: int) -> torch.Tensor: |
| seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) |
| freqs = torch.outer(seq, self.inv_freq) |
| return freqs |
|
|
|
|
| class OpenPanguRMSNorm(PanguEmbeddedRMSNorm): |
| pass |
|
|
|
|
| class OpenPanguVLPatchMerger(nn.Module): |
| def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None: |
| super().__init__() |
| self.hidden_size = context_dim * (spatial_merge_size**2) |
| self.ln_q = OpenPanguRMSNorm(context_dim, eps=1e-6) |
| self.mlp = nn.Sequential( |
| nn.Linear(self.hidden_size, self.hidden_size), |
| nn.GELU(), |
| nn.Linear(self.hidden_size, dim), |
| ) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| x = self.mlp(self.ln_q(x).view(-1, self.hidden_size)) |
| return x |
|
|
|
|
| def rotate_half(x): |
| """Rotates half the hidden dims of the input.""" |
| x1 = x[..., : x.shape[-1] // 2] |
| x2 = x[..., x.shape[-1] // 2 :] |
| return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
| def apply_rotary_pos_emb_vision( |
| q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| orig_q_dtype = q.dtype |
| orig_k_dtype = k.dtype |
| q, k = q.float(), k.float() |
| cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float() |
| q_embed = (q * cos) + (rotate_half(q) * sin) |
| k_embed = (k * cos) + (rotate_half(k) * sin) |
| q_embed = q_embed.to(orig_q_dtype) |
| k_embed = k_embed.to(orig_k_dtype) |
| return q_embed, k_embed |
|
|
|
|
| def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
| """ |
| This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, |
| num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
| """ |
| batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
| if n_rep == 1: |
| return hidden_states |
| hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) |
| return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) |
|
|
|
|
| def eager_attention_forward( |
| module: nn.Module, |
| query: torch.Tensor, |
| key: torch.Tensor, |
| value: torch.Tensor, |
| attention_mask: Optional[torch.Tensor], |
| scaling: float, |
| dropout: float = 0.0, |
| **kwargs, |
| ): |
| key_states = repeat_kv(key, module.num_key_value_groups) |
| value_states = repeat_kv(value, module.num_key_value_groups) |
|
|
| attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling |
| if attention_mask is not None: |
| causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] |
| attn_weights = attn_weights + causal_mask |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
| attn_output = torch.matmul(attn_weights, value_states) |
| attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
| return attn_output, attn_weights |
|
|
|
|
| class OpenPanguVLVisionAttention(nn.Module): |
| def __init__(self, config: OpenPanguVLVisionConfig) -> None: |
| super().__init__() |
| self.dim = config.hidden_size |
| self.num_heads = config.num_heads |
| self.head_dim = self.dim // self.num_heads |
| self.num_key_value_groups = 1 |
| self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True) |
| self.proj = nn.Linear(self.dim, self.dim) |
| self.scaling = self.head_dim**-0.5 |
| self.config = config |
| self.attention_dropout = 0.0 |
| self.is_causal = False |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| cu_seqlens: torch.Tensor, |
| rotary_pos_emb: Optional[torch.Tensor] = None, |
| position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| **kwargs, |
| ) -> torch.Tensor: |
| seq_length = hidden_states.shape[0] |
| query_states, key_states, value_states = ( |
| self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) |
| ) |
| if position_embeddings is None: |
| logger.warning_once( |
| "The attention layers in this model are transitioning from computing the RoPE embeddings internally " |
| "through `rotary_pos_emb` (2D tensor of RoPE theta values), to using externally computed " |
| "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.54 `rotary_pos_emb` will be " |
| "removed and `position_embeddings` will be mandatory." |
| ) |
| emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) |
| cos = emb.cos() |
| sin = emb.sin() |
| else: |
| cos, sin = position_embeddings |
| query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin) |
|
|
| query_states = query_states.transpose(0, 1).unsqueeze(0) |
| key_states = key_states.transpose(0, 1).unsqueeze(0) |
| value_states = value_states.transpose(0, 1).unsqueeze(0) |
| max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() |
|
|
| attention_interface: Callable = eager_attention_forward |
| if self.config._attn_implementation != "eager": |
| attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
|
|
| if not self.training and NPU_ATTN_INFR: |
| if isinstance(cu_seqlens, torch.Tensor): |
| cu_seqlens = cu_seqlens.tolist() |
|
|
| q, k, v = [rearrange(x, "b n s d -> (b s) n d") for x in [query_states, key_states, value_states]] |
| attn_output = torch_npu.npu_fusion_attention( |
| q, |
| k, |
| v, |
| self.num_heads, |
| "TND", |
| pse=None, |
| padding_mask=None, |
| atten_mask=None, |
| scale=self.scaling, |
| pre_tockens=1048576, |
| next_tockens=0, |
| keep_prob=1.0, |
| inner_precise=0, |
| sparse_mode=0, |
| actual_seq_qlen=cu_seqlens, |
| actual_seq_kvlen=cu_seqlens, |
| )[0] |
| else: |
| attn_output, _ = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask=attention_mask, |
| dropout=0.0 if not self.training else self.attention_dropout, |
| scaling=self.scaling, |
| cu_seq_lens_q=cu_seqlens, |
| cu_seq_lens_k=cu_seqlens, |
| max_length_q=max_seqlen, |
| max_length_k=max_seqlen, |
| is_causal=False, |
| **kwargs, |
| ) |
|
|
| attn_output = attn_output.reshape(seq_length, -1).contiguous() |
| attn_output = self.proj(attn_output) |
| return attn_output |
|
|
|
|
| class OpenPanguVLVisionBlock(GradientCheckpointingLayer): |
| def __init__(self, config, attn_implementation: str = "sdpa") -> None: |
| super().__init__() |
| self.norm1 = OpenPanguRMSNorm(config.hidden_size, eps=1e-6) |
| self.norm2 = OpenPanguRMSNorm(config.hidden_size, eps=1e-6) |
| self.attn = OpenPanguVLVisionAttention(config=config) |
| self.mlp = OpenPanguVLMLP(config, bias=True) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| cu_seqlens: torch.Tensor, |
| rotary_pos_emb: Optional[torch.Tensor] = None, |
| position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| **kwargs, |
| ) -> torch.Tensor: |
| hidden_states = hidden_states + self.attn( |
| self.norm1(hidden_states), |
| cu_seqlens=cu_seqlens, |
| rotary_pos_emb=rotary_pos_emb, |
| position_embeddings=position_embeddings, |
| attention_mask=attention_mask, |
| **kwargs, |
| ) |
| hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) |
| return hidden_states |
|
|
|
|
| @auto_docstring |
| class OpenPanguPreTrainedModel(PreTrainedModel): |
| config_class = OpenPanguConfig |
| base_model_prefix = "model" |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["OpenPanguVLDecoderLayer", "OpenPanguVLVisionBlock"] |
| _skip_keys_device_placement = "past_key_values" |
| _supports_flash_attn_2 = True |
| _supports_sdpa = True |
| _supports_cache_class = True |
| _supports_static_cache = True |
| _supports_attention_backend = True |
|
|
| def _init_weights(self, module): |
| std = self.config.get_text_config().initializer_range |
| if isinstance(module, (nn.Linear, nn.Conv3d)): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=std) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, OpenPanguRMSNorm): |
| module.weight.data.fill_(1.0) |
|
|
|
|
| class OpenPanguVisionTransformerPretrainedModel(OpenPanguPreTrainedModel): |
| config_class = OpenPanguVLVisionConfig |
| _no_split_modules = ["OpenPanguVLVisionBlock"] |
|
|
| def __init__(self, config, *inputs, **kwargs) -> None: |
| super().__init__(config, *inputs, **kwargs) |
| self.spatial_merge_size = config.spatial_merge_size |
| self.patch_size = config.patch_size |
| self.fullatt_block_indexes = config.fullatt_block_indexes |
| self.window_size = config.window_size |
| self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size |
| self.patch_embed = OpenPanguVLPatchEmbed( |
| patch_size=config.patch_size, |
| temporal_patch_size=config.temporal_patch_size, |
| in_channels=config.in_channels, |
| embed_dim=config.hidden_size, |
| ) |
| head_dim = config.hidden_size // config.num_heads |
| self.rotary_pos_emb = OpenPanguVisionRotaryEmbedding(head_dim // 2) |
| self.blocks = nn.ModuleList([OpenPanguVLVisionBlock(config) for _ in range(config.depth)]) |
| self.select_layer = getattr(config, "mm_unit_vision_select_layer", [-1, -3]) |
| self.select_index = [config.depth + i for i in self.select_layer] |
| self.select_index = self.select_index[::-1] |
| self.select_layer = [-1 * (i + 1) for i in range(len(self.select_index))] |
| self.merger = nn.ModuleList( |
| [ |
| OpenPanguVLPatchMerger( |
| dim=config.out_hidden_size, |
| context_dim=config.hidden_size, |
| spatial_merge_size=config.spatial_merge_size, |
| ) |
| for i in range(len(self.select_layer)) |
| ] |
| ) |
| self.gradient_checkpointing = False |
| self.take_indices = self.select_index |
| self.final_layernorm = OpenPanguRMSNorm(config.hidden_size, eps=1e-6) |
|
|
| def rot_pos_emb(self, grid_thw): |
| pos_ids = [] |
| for t, h, w in grid_thw: |
| hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) |
| hpos_ids = hpos_ids.reshape( |
| h // self.spatial_merge_size, |
| self.spatial_merge_size, |
| w // self.spatial_merge_size, |
| self.spatial_merge_size, |
| ) |
| hpos_ids = hpos_ids.permute(0, 2, 1, 3) |
| hpos_ids = hpos_ids.flatten() |
|
|
| wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) |
| wpos_ids = wpos_ids.reshape( |
| h // self.spatial_merge_size, |
| self.spatial_merge_size, |
| w // self.spatial_merge_size, |
| self.spatial_merge_size, |
| ) |
| wpos_ids = wpos_ids.permute(0, 2, 1, 3) |
| wpos_ids = wpos_ids.flatten() |
| pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) |
| pos_ids = torch.cat(pos_ids, dim=0) |
| max_grid_size = grid_thw[:, 1:].max() |
| rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) |
| rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) |
| return rotary_pos_emb |
|
|
| def get_window_index(self, grid_thw): |
| window_index: list = [] |
| cu_window_seqlens: list = [0] |
| window_index_id = 0 |
| vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size |
|
|
| for grid_t, grid_h, grid_w in grid_thw: |
| llm_grid_h, llm_grid_w = ( |
| grid_h // self.spatial_merge_size, |
| grid_w // self.spatial_merge_size, |
| ) |
| index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) |
| pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size |
| pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size |
| num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size |
| num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size |
| index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) |
| index_padded = index_padded.reshape( |
| grid_t, |
| num_windows_h, |
| vit_merger_window_size, |
| num_windows_w, |
| vit_merger_window_size, |
| ) |
| index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( |
| grid_t, |
| num_windows_h * num_windows_w, |
| vit_merger_window_size, |
| vit_merger_window_size, |
| ) |
| seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) |
| index_padded = index_padded.reshape(-1) |
| index_new = index_padded[index_padded != -100] |
| window_index.append(index_new + window_index_id) |
| cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] |
| cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) |
| window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() |
| window_index = torch.cat(window_index, dim=0) |
|
|
| return window_index, cu_window_seqlens |
|
|
| def _prepare_attention_mask(self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor: |
| |
| |
| |
| |
| if self.config._attn_implementation == "flash_attention_2": |
| return None |
|
|
| seq_length = inputs_tensor.shape[0] |
| attention_mask = torch.full( |
| [1, 1, seq_length, seq_length], |
| torch.finfo(inputs_tensor.dtype).min, |
| device=inputs_tensor.device, |
| dtype=inputs_tensor.dtype, |
| ) |
| for i in range(1, len(cu_seqlens)): |
| attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0 |
| return attention_mask |
|
|
| def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor: |
| """ |
| Args: |
| hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): |
| The final hidden states of the model. |
| grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): |
| The temporal, height and width of feature shape of each image in LLM. |
| |
| Returns: |
| `torch.Tensor`: hidden_states. |
| """ |
| hidden_states = self.patch_embed(hidden_states) |
| rotary_pos_emb = self.rot_pos_emb(grid_thw) |
| window_index, cu_window_seqlens = self.get_window_index(grid_thw) |
| cu_window_seqlens = torch.tensor( |
| cu_window_seqlens, |
| device=hidden_states.device, |
| dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, |
| ) |
| cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) |
|
|
| seq_len, _ = hidden_states.size() |
| hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) |
| hidden_states = hidden_states[window_index, :, :] |
| hidden_states = hidden_states.reshape(seq_len, -1) |
| rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) |
| rotary_pos_emb = rotary_pos_emb[window_index, :, :] |
| rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) |
| emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) |
| position_embeddings = (emb.cos(), emb.sin()) |
|
|
| cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( |
| dim=0, |
| |
| |
| |
| |
| dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, |
| ) |
| cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) |
| intermediates = [] |
| for layer_num, blk in enumerate(self.blocks): |
| if layer_num in self.fullatt_block_indexes: |
| cu_seqlens_now = cu_seqlens |
| else: |
| cu_seqlens_now = cu_window_seqlens |
|
|
| attention_mask = self._prepare_attention_mask(hidden_states, cu_seqlens_now) |
| hidden_states = blk( |
| hidden_states, |
| cu_seqlens=cu_seqlens_now, |
| position_embeddings=position_embeddings, |
| attention_mask=attention_mask, |
| **kwargs, |
| ) |
| if layer_num in self.take_indices: |
| ln_hs = self.final_layernorm(hidden_states) |
| intermediates.append(ln_hs) |
|
|
| image_embeddings_list = [] |
| for idx, sl in enumerate(self.select_layer): |
| image_embeddings_list.append(self.merger[idx](intermediates[sl])) |
| hidden_states = sum(image_embeddings_list) |
|
|
| reverse_indices = torch.argsort(window_index) |
| hidden_states = hidden_states[reverse_indices, :] |
|
|
| return hidden_states |
|
|
|
|
| @dataclass |
| @auto_docstring( |
| custom_intro=""" |
| Base class for Llava outputs, with hidden states and attentions. |
| """ |
| ) |
| class OpenPanguVLModelOutputWithPast(ModelOutput): |
| r""" |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`) |
| |
| Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see |
| `past_key_values` input) to speed up sequential decoding. |
| rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): |
| The rope index difference between sequence length and multimodal rope. |
| """ |
|
|
| last_hidden_state: torch.FloatTensor = None |
| past_key_values: Optional[list[torch.FloatTensor]] = None |
| hidden_states: Optional[tuple[torch.FloatTensor]] = None |
| attentions: Optional[tuple[torch.FloatTensor]] = None |
| rope_deltas: Optional[torch.LongTensor] = None |
|
|
|
|
| class OpenPanguVLRotaryEmbedding(nn.Module): |
| def __init__(self, config: OpenPanguVLTextConfig, device=None): |
| super().__init__() |
| |
| if hasattr(config, "rope_scaling") and config.rope_scaling is not None: |
| self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) |
| self.mrope_interleaved = config.rope_scaling.get("mrope_interleaved", False) |
| else: |
| self.rope_type = "default" |
| self.max_seq_len_cached = config.max_position_embeddings |
| self.original_max_seq_len = config.max_position_embeddings |
|
|
| self.config = config |
| self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] |
|
|
| inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) |
| self.register_buffer("inv_freq", inv_freq, persistent=False) |
| self.original_inv_freq = self.inv_freq |
|
|
| mrope_section = config.rope_scaling.get("mrope_section", None) |
| self.mrope_section = mrope_section |
| if self.mrope_interleaved: |
| if not self.mrope_section: |
| raise AssertionError("when you use interleave mrope, mrope_section cannot be None.") |
|
|
| |
| if len(mrope_section) == 2: |
| h_num, w_num = mrope_section[0], mrope_section[1] |
| mrope_dim = self.get_mrope_interleaved_id_list(h_num, w_num, 0) |
| elif len(mrope_section) == 3: |
| t_num, h_num, w_num = mrope_section[0], mrope_section[1], mrope_section[2] |
| mrope_dim = self.get_mrope_interleaved_id_list(t_num, h_num, w_num, force_last=True) |
| else: |
| raise AssertionError("Cannot support the length of mrope section is not 2 or 3.") |
| mrope_dim = mrope_dim * 2 |
| self.mrope_dim = mrope_dim |
|
|
| @torch.no_grad() |
| @dynamic_rope_update |
| def forward(self, x, position_ids): |
| |
| |
| inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) |
| position_ids_expanded = position_ids[:, :, None, :].float() |
|
|
| device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" |
| with torch.autocast(device_type=device_type, enabled=False): |
| freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) |
| emb = torch.cat((freqs, freqs), dim=-1) |
| |
| if self.mrope_interleaved: |
| mrope_section_3d = [1] * len(self.mrope_dim) |
| mrope_dim = self.mrope_dim |
| emb = torch.cat([m[mrope_dim[i]] for i, m in enumerate(emb.split(mrope_section_3d, dim=-1))], dim=-1) |
|
|
| cos = emb.cos() * self.attention_scaling |
| sin = emb.sin() * self.attention_scaling |
| |
| if not self.mrope_interleaved and self.mrope_section: |
| mrope_section = self.mrope_section * 2 |
| cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1) |
| sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1) |
|
|
| return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) |
|
|
| @staticmethod |
| def get_mrope_interleaved_id_list(a: int, b: int, c: int, force_last: bool = False) -> list[int]: |
| """ |
| Generate an interleaved list of indices for multi-modal rotary embedding. |
| |
| Args: |
| a: Number of indices for first modality |
| b: Number of indices for second modality |
| c: Number of indices for third modality |
| force_last: Whether to force the last element to be from the first modality |
| |
| Returns: |
| List of interleaved indices |
| """ |
| if force_last: |
| a -= 1 |
|
|
| counts = {0: a, 1: b, 2: c} |
| placed = dict.fromkeys(counts, 0) |
| rem = counts.copy() |
| seq: list[int] = [] |
| last = None |
|
|
| total = a + b + c |
| for _ in range(total): |
| |
| cands = [k for k in rem if rem[k] > 0 and k != last] |
| if not cands: |
| |
| cands = [k for k in rem if rem[k] > 0] |
|
|
| |
| try: |
| best = min(cands, key=lambda k: (placed[k] / counts[k], k)) |
| except KeyError: |
| best = 0 |
|
|
| seq.append(best) |
| placed[best] += 1 |
| rem[best] -= 1 |
| last = best |
|
|
| if force_last: |
| seq.append(0) |
|
|
| return seq |
|
|
|
|
| def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1): |
| """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). |
| |
| Explanation: |
| Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding |
| sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For |
| vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately. |
| Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. |
| For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, |
| height and width) of text embedding is always the same, so the text embedding rotary position embedding has no |
| difference with modern LLMs. |
| |
| Args: |
| q (`torch.Tensor`): The query tensor. |
| k (`torch.Tensor`): The key tensor. |
| cos (`torch.Tensor`): The cosine part of the rotary embedding. |
| sin (`torch.Tensor`): The sine part of the rotary embedding. |
| position_ids (`torch.Tensor`): |
| The position indices of the tokens corresponding to the query and key tensors. For example, this can be |
| used to pass offsetted position ids when working with a KV-cache. |
| mrope_section(`List(int)`): |
| Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. |
| unsqueeze_dim (`int`, *optional*, defaults to 1): |
| The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
| sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
| that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
| k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
| cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
| the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
| Returns: |
| `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
| """ |
| mrope_section = mrope_section * 2 |
| cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( |
| unsqueeze_dim |
| ) |
| sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( |
| unsqueeze_dim |
| ) |
|
|
| q_embed = (q * cos) + (rotate_half(q) * sin) |
| k_embed = (k * cos) + (rotate_half(k) * sin) |
| return q_embed, k_embed |
|
|
|
|
| class OpenPanguVLAttention(nn.Module): |
| """ |
| Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer |
| and "Generating Long Sequences with Sparse Transformers". |
| """ |
|
|
| def __init__(self, config: OpenPanguVLTextConfig, layer_idx: Optional[int] = None): |
| super().__init__() |
| self.config = config |
| self.layer_idx = layer_idx |
| if layer_idx is None: |
| logger.warning_once( |
| f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " |
| "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " |
| "when creating this class." |
| ) |
|
|
| self.hidden_size = config.hidden_size |
| self.num_heads = config.num_attention_heads |
| self.head_dim = self.hidden_size // self.num_heads |
| self.num_key_value_heads = config.num_key_value_heads |
| self.num_key_value_groups = self.num_heads // self.num_key_value_heads |
| self.is_causal = True |
| self.attention_dropout = config.attention_dropout |
| self.rope_scaling = config.rope_scaling |
| self.scaling = self.head_dim**-0.5 |
|
|
| if (self.head_dim * self.num_heads) != self.hidden_size: |
| raise ValueError( |
| f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" |
| f" and `num_heads`: {self.num_heads})." |
| ) |
| self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) |
| self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) |
| self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) |
| self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) |
| self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None |
| self.rotary_emb = OpenPanguVLRotaryEmbedding(config=config) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_value: Optional[Cache] = None, |
| output_attentions: bool = False, |
| use_cache: bool = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, |
| **kwargs: Unpack[FlashAttentionKwargs], |
| ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: |
| bsz, q_len, _ = hidden_states.size() |
|
|
| query_states = self.q_proj(hidden_states) |
| key_states = self.k_proj(hidden_states) |
| value_states = self.v_proj(hidden_states) |
|
|
| query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) |
| key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) |
| value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) |
|
|
| cos, sin = position_embeddings |
| query_states, key_states = apply_multimodal_rotary_pos_emb( |
| query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] |
| ) |
|
|
| if past_key_value is not None: |
| cache_kwargs = { |
| "sin": sin, |
| "cos": cos, |
| "cache_position": cache_position, |
| } |
| key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) |
|
|
| attention_interface: Callable = eager_attention_forward |
| if self.config._attn_implementation != "eager": |
| attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
|
|
| attn_output, attn_weights = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask, |
| dropout=0.0 if not self.training else self.attention_dropout, |
| scaling=self.scaling, |
| sliding_window=self.sliding_window, |
| **kwargs, |
| ) |
|
|
| attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() |
| attn_output = self.o_proj(attn_output) |
| return attn_output, attn_weights, past_key_value |
|
|
|
|
| class OpenPanguVLDecoderLayer(GradientCheckpointingLayer): |
| def __init__(self, config: OpenPanguVLTextConfig, layer_idx: int): |
| super().__init__() |
| self.hidden_size = config.hidden_size |
|
|
| if config.use_sliding_window and config._attn_implementation != "flash_attention_2": |
| logger.warning_once( |
| f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " |
| "unexpected results may be encountered." |
| ) |
| self.self_attn = OpenPanguVLAttention(config, layer_idx) |
| self.mlp = OpenPanguVLMLP(config) |
| self.input_layernorm = OpenPanguRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| self.post_attention_layernorm = OpenPanguRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| self.attention_type = config.layer_types[layer_idx] |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_value: Optional[tuple[torch.Tensor]] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, |
| **kwargs: Unpack[FlashAttentionKwargs], |
| ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| attention_mask (`torch.FloatTensor`, *optional*): attention mask of size |
| `(batch, sequence_length)` where padding elements are indicated by 0. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding |
| (see `past_key_values`). |
| past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states |
| cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): |
| Indices depicting the position of the input sequence tokens in the sequence. |
| position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): |
| Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, |
| with `head_dim` being the embedding dimension of each attention head. |
| kwargs (`dict`, *optional*): |
| Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code |
| into the model |
| """ |
|
|
| residual = hidden_states |
|
|
| hidden_states = self.input_layernorm(hidden_states) |
|
|
| |
| hidden_states, self_attn_weights, present_key_value = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_value=past_key_value, |
| output_attentions=output_attentions, |
| use_cache=use_cache, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| **kwargs, |
| ) |
| hidden_states = residual + hidden_states |
|
|
| |
| residual = hidden_states |
| hidden_states = self.post_attention_layernorm(hidden_states) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (self_attn_weights,) |
|
|
| if use_cache: |
| outputs += (present_key_value,) |
|
|
| return outputs |
|
|
|
|
| class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... |
|
|
|
|
| class ProjectionSingle(nn.Module): |
| def __init__(self, i_hidden_size: int, t_hidden_size: int): |
| super().__init__() |
| self.act = F.silu |
| self.fc1 = nn.Linear(i_hidden_size, t_hidden_size, bias=True) |
|
|
| def forward(self, hidden_states): |
| x = self.act(hidden_states) |
| return self.fc1(x) |
|
|
|
|
| @auto_docstring |
| class OpenPanguVLTextModel(PanguEmbeddedModel): |
| def __init__(self, config: PanguEmbeddedConfig): |
| super().__init__(config) |
| self.rotary_emb = OpenPanguVLRotaryEmbedding(config=config) |
|
|
|
|
| @auto_docstring |
| class OpenPanguVLModel(OpenPanguPreTrainedModel): |
| base_model_prefix = "" |
| _checkpoint_conversion_mapping = {"^model": "language_model"} |
| config_class = OpenPanguConfig |
| _no_split_modules = ["OpenPanguVLDecoderLayer", "OpenPanguVLVisionBlock"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.visual = OpenPanguVisionTransformerPretrainedModel._from_config(config.vision_config) |
| self.language_model = OpenPanguVLTextModel(config.text_config) |
|
|
| self.rope_deltas = None |
|
|
| self.visual.vision_projection = ProjectionSingle(config.vision_config.out_hidden_size, config.hidden_size) |
|
|
| |
| self.post_init() |
| self._parse_preprocess_params(self.config.vision_config) |
|
|
| def _parse_preprocess_params(self, vision_config): |
| self.channel = vision_config.in_channels |
| self.patch_size = vision_config.patch_size |
| from transformers import AutoProcessor |
| processor = AutoProcessor.from_pretrained(self.config.name_or_path, trust_remote_code=True, local_files_only=True) |
| self.do_rescale = processor.image_processor.do_rescale |
| self.rescale_factor = processor.image_processor.rescale_factor |
| self.do_normalize = processor.image_processor.do_normalize |
| self.image_mean = tuple(processor.image_processor.image_mean) |
| self.image_std = tuple(processor.image_processor.image_std) |
|
|
| def get_input_embeddings(self): |
| return self.language_model.get_input_embeddings() |
|
|
| def set_input_embeddings(self, value): |
| self.language_model.set_input_embeddings(value) |
|
|
| def set_decoder(self, decoder): |
| self.language_model = decoder |
|
|
| def get_decoder(self): |
| return self.language_model |
|
|
| def get_rope_index( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| image_grid_thw: Optional[torch.LongTensor] = None, |
| video_grid_thw: Optional[torch.LongTensor] = None, |
| second_per_grid_ts: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| """ |
| Calculate the 3D rope index based on image and video's temporal, height and width in LLM. |
| |
| Explanation: |
| Each embedding sequence contains vision embedding and text embedding or just contains text embedding. |
| |
| For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. |
| Examples: |
| input_ids: [T T T T T], here T is for text. |
| temporal position_ids: [0, 1, 2, 3, 4] |
| height position_ids: [0, 1, 2, 3, 4] |
| width position_ids: [0, 1, 2, 3, 4] |
| |
| For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part |
| and 1D rotary position embedding for text part. |
| Examples: |
| Temporal (Time): 3 patches, representing different segments of the video in time. |
| Height: 2 patches, dividing each frame vertically. |
| Width: 2 patches, dividing each frame horizontally. |
| We also have some important parameters: |
| fps (Frames Per Second): The video's frame rate, set to 1. |
| This means one frame is processed each second. |
| tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" |
| are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens |
| per second. So each second of the video will be represented with 25 separate time points. |
| It essentially defines the temporal granularity. |
| temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. |
| interval: The step size for the temporal position IDs, calculated as |
| tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. |
| This means that each temporal patch will be have a difference of 50 in the temporal position IDs. |
| input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. |
| vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] |
| vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] |
| vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] |
| text temporal position_ids: [101, 102, 103, 104, 105] |
| text height position_ids: [101, 102, 103, 104, 105] |
| text width position_ids: [101, 102, 103, 104, 105] |
| Here we calculate the text start position_ids as the max vision position_ids plus 1. |
| |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. |
| Padding will be ignored by default should you provide it. |
| image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): |
| The temporal, height and width of feature shape of each image in LLM. |
| video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): |
| The temporal, height and width of feature shape of each video in LLM. |
| second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): |
| The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| Returns: |
| position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) |
| mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) |
| """ |
| spatial_merge_size = self.config.vision_config.spatial_merge_size |
| image_token_id = self.config.image_token_id |
| video_token_id = self.config.video_token_id |
| vision_start_token_id = self.config.vision_start_token_id |
| vision_end_token_id = self.config.vision_end_token_id |
| tokens_per_second = getattr(self.config, "tokens_per_second", 1.0) |
| mrope_position_deltas = [] |
| if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): |
| total_input_ids = input_ids |
| if attention_mask is None: |
| attention_mask = torch.ones_like(total_input_ids) |
| position_ids = torch.ones( |
| 3, |
| input_ids.shape[0], |
| input_ids.shape[1], |
| dtype=input_ids.dtype, |
| device=input_ids.device, |
| ) |
| attention_mask = attention_mask.to(total_input_ids.device) |
| for i, input_ids in enumerate(total_input_ids): |
| input_ids = input_ids[attention_mask[i] == 1] |
| input_tokens = input_ids.tolist() |
| src_item = input_tokens |
| video_idx = 0 |
| image_idx = 0 |
| new_src_item: list[int] = [] |
| llm_pos_ids_list: list[torch.Tensor] = [] |
|
|
| idx = 0 |
| while idx < len(src_item): |
| new_src_item_len = len(new_src_item) |
| start_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 |
| if src_item[idx] not in [video_token_id, image_token_id]: |
| new_src_item.append(src_item[idx]) |
| llm_pos_ids = torch.tensor([start_idx], dtype=torch.long).expand(3, -1) |
| llm_pos_ids_list.append(llm_pos_ids.to(position_ids.device)) |
| elif src_item[idx] == image_token_id: |
| grid_t = image_grid_thw[image_idx][0] |
| grid_hs = image_grid_thw[:, 1] |
| grid_ws = image_grid_thw[:, 2] |
| t_index = (torch.arange(grid_t) * 1 * tokens_per_second).long() |
| llm_pos_ids = self._get_llm_pos_ids_for_vision( |
| start_idx, image_idx, spatial_merge_size, t_index, grid_hs, grid_ws |
| ) |
| llm_pos_ids_list.append(llm_pos_ids.to(position_ids.device)) |
| vision_seqlen = image_grid_thw[image_idx].prod() // (spatial_merge_size**2) |
| new_src_item.extend([image_token_id] * vision_seqlen) |
| image_idx += 1 |
| else: |
| |
| |
| T = video_grid_thw[video_idx][0].item() |
| H = video_grid_thw[video_idx][1].item() |
| W = video_grid_thw[video_idx][2].item() |
| llm_H = H // spatial_merge_size |
| llm_W = W // spatial_merge_size |
| tokens_per_frame = llm_H * llm_W |
| |
| t_index_all = (torch.arange(T)).long() |
| |
| start_pos = llm_pos_ids_list[-1].max().item() + 1 if llm_pos_ids_list else 0 |
| current_pos = start_pos |
| |
| final_frame_time = T - 1 |
| for t in range(T): |
| |
| if t != 0: |
| new_src_item.append(vision_start_token_id) |
| bot_pos = torch.full((3, 1), current_pos, dtype=torch.long) |
| llm_pos_ids_list.append(bot_pos.to(position_ids.device)) |
| current_pos += 1 |
| |
| |
| grid_h = torch.arange(llm_H).view(-1, 1).expand(-1, llm_W).flatten() |
| grid_w = torch.arange(llm_W).view(1, -1).expand(llm_H, -1).flatten() |
| |
| frame_pos = torch.stack( |
| [ |
| torch.full_like(grid_h, 0, dtype=torch.long), |
| grid_h, |
| grid_w |
| ] |
| ) |
| frame_pos_with_offset = frame_pos + current_pos |
| new_src_item.extend([video_token_id] * tokens_per_frame) |
| llm_pos_ids_list.append(frame_pos_with_offset.to(position_ids.device)) |
| current_pos += max(llm_H, llm_W) |
| |
| if t != final_frame_time: |
| new_src_item.append(vision_end_token_id) |
| eot_pos = torch.full((3, 1), current_pos, dtype=torch.long) |
| llm_pos_ids_list.append(eot_pos.to(position_ids.device)) |
| current_pos += 1 |
| video_idx += 1 |
| |
| idx += len(new_src_item) - new_src_item_len |
| llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) |
| position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) |
| mrope_position_delta = llm_positions.max() + 1 - len(src_item) |
| mrope_position_deltas.append(mrope_position_delta) |
| mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) |
| return position_ids, mrope_position_deltas |
| else: |
| if attention_mask is not None: |
| position_ids = attention_mask.long().cumsum(-1) - 1 |
| position_ids.masked_fill_(attention_mask == 0, 1) |
| position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) |
| max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] |
| mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] |
| else: |
| position_ids = ( |
| torch.arange(input_ids.shape[1], device=input_ids.device) |
| .view(1, 1, -1) |
| .expand(3, input_ids.shape[0], -1) |
| ) |
| mrope_position_deltas = torch.zeros( |
| [input_ids.shape[0], 1], |
| device=input_ids.device, |
| dtype=input_ids.dtype, |
| ) |
|
|
| return position_ids, mrope_position_deltas |
|
|
| def _get_llm_pos_ids_for_vision( |
| self, |
| start_idx: int, |
| vision_idx: int, |
| spatial_merge_size: int, |
| t_index: list[int], |
| grid_hs: torch.Tensor, |
| grid_ws: torch.Tensor, |
| ) -> torch.Tensor: |
| llm_pos_ids_list = [] |
| llm_grid_h = grid_hs[vision_idx] // spatial_merge_size |
| llm_grid_w = grid_ws[vision_idx] // spatial_merge_size |
| h_index = ( |
| torch.arange(llm_grid_h) |
| .to(llm_grid_h.device) |
| .view(1, -1, 1) |
| .expand(len(t_index), -1, llm_grid_w) |
| .flatten() |
| ) |
| w_index = ( |
| torch.arange(llm_grid_w) |
| .to(llm_grid_h.device) |
| .view(1, 1, -1) |
| .expand(len(t_index), llm_grid_h, -1) |
| .flatten() |
| ) |
| t_index_tensor = ( |
| torch.Tensor(t_index) |
| .to(llm_grid_h.device) |
| .view(-1, 1) |
| .expand(-1, llm_grid_h * llm_grid_w) |
| .long() |
| .flatten() |
| ) |
| _llm_pos_ids = torch.stack([t_index_tensor, h_index, w_index]) |
| llm_pos_ids_list.append(_llm_pos_ids + start_idx) |
| llm_pos_ids = torch.cat(llm_pos_ids_list, dim=1) |
| return llm_pos_ids |
|
|
| def get_video_features( |
| self, |
| pixel_values_videos: torch.FloatTensor, |
| video_grid_thw: Optional[torch.LongTensor] = None, |
| ): |
| """ |
| Encodes videos into continuous embeddings that can be forwarded to the language model. |
| |
| Args: |
| pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): |
| The tensors corresponding to the input videos. |
| video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): |
| The temporal, height and width of feature shape of each video in LLM. |
| """ |
| pixel_values_videos = pixel_values_videos.type(self.visual.dtype) |
| video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) |
|
|
| video_embeds = self.visual.vision_projection(video_embeds) |
|
|
| split_sizes = (video_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist() |
| video_embeds = torch.split(video_embeds, split_sizes) |
| return video_embeds |
|
|
| def get_image_features( |
| self, |
| pixel_values: torch.FloatTensor, |
| image_grid_thw: Optional[torch.LongTensor] = None, |
| ): |
| """ |
| Encodes images into continuous embeddings that can be forwarded to the language model. |
| |
| Args: |
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): |
| The tensors corresponding to the input images. |
| image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): |
| The temporal, height and width of feature shape of each image in LLM. |
| """ |
| pixel_values = pixel_values.type(self.visual.dtype) |
| |
| pixel_values = pixel_values.reshape(-1, self.channel, self.patch_size, self.patch_size) |
| pixel_values = rescale_and_normalize(pixel_values, self.do_rescale, self.rescale_factor, self.do_normalize, |
| self.image_mean, self.image_std) |
| pixel_values = pixel_values.reshape(-1, self.channel * self.patch_size * self.patch_size) |
| image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) |
|
|
| image_embeds = self.visual.vision_projection(image_embeds) |
|
|
| split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist() |
| image_embeds = torch.split(image_embeds, split_sizes) |
| return image_embeds |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[list[torch.FloatTensor]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| pixel_values: Optional[torch.Tensor] = None, |
| pixel_values_videos: Optional[torch.FloatTensor] = None, |
| image_grid_thw: Optional[torch.LongTensor] = None, |
| video_grid_thw: Optional[torch.LongTensor] = None, |
| rope_deltas: Optional[torch.LongTensor] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| second_per_grid_ts: Optional[torch.Tensor] = None, |
| **kwargs: Unpack[KwargsForCausalLM], |
| ) -> Union[tuple, OpenPanguVLModelOutputWithPast]: |
| r""" |
| pixel_values_videos (`torch.FloatTensor` of shape `(seq_length, |
| num_channels * temporal_size * image_size * image_size)): |
| The tensors corresponding to the input videos. Pixel values can be obtained using |
| [`AutoImageProcessor`]. See [`OpenPanguVLImageProcessor.__call__`] for details. [`OpenPanguVLProcessor`] uses |
| [`OpenPanguVLImageProcessor`] for processing videos. |
| image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): |
| The temporal, height and width of feature shape of each image in LLM. |
| video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): |
| The temporal, height and width of feature shape of each video in LLM. |
| rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): |
| The rope index difference between sequence length and multimodal rope. |
| second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): |
| The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. |
| """ |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.get_input_embeddings()(input_ids) |
| if pixel_values is not None: |
| image_embeds = self.get_image_features(pixel_values, image_grid_thw) |
| image_embeds = torch.cat(image_embeds, dim=0) |
| n_image_tokens = (input_ids == self.config.image_token_id).sum() |
| n_image_features = image_embeds.shape[0] |
| if not is_torchdynamo_compiling() and n_image_tokens != n_image_features: |
| raise ValueError( |
| "Image features and image tokens do not match: " |
| f"tokens: {n_image_tokens}, features {n_image_features}" |
| ) |
|
|
| mask = input_ids == self.config.image_token_id |
| mask_unsqueezed = mask.unsqueeze(-1) |
| mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) |
| image_mask = mask_expanded.to(inputs_embeds.device) |
|
|
| image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) |
| inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) |
|
|
| if pixel_values_videos is not None: |
| video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw) |
| video_embeds = torch.cat(video_embeds, dim=0) |
| n_video_tokens = (input_ids == self.config.video_token_id).sum() |
| n_video_features = video_embeds.shape[0] |
| if not is_torchdynamo_compiling() and n_video_tokens != n_video_features: |
| raise ValueError( |
| "Video features and video tokens do not match: " |
| f"tokens: {n_video_tokens}, features {n_video_features}" |
| ) |
|
|
| mask = input_ids == self.config.video_token_id |
| mask_unsqueezed = mask.unsqueeze(-1) |
| mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) |
| video_mask = mask_expanded.to(inputs_embeds.device) |
|
|
| video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) |
| inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) |
|
|
| if position_ids is None: |
| attention_mask_tensor = ( |
| attention_mask if not isinstance(attention_mask, dict) else attention_mask["full_attention"] |
| ) |
| if attention_mask_tensor is not None and attention_mask_tensor.ndim == 4: |
| attention_mask_tensor = torch.diagonal(attention_mask_tensor[:, 0], dim1=1, dim2=2) |
| attention_mask_tensor = attention_mask_tensor / torch.finfo(attention_mask_tensor.dtype).min |
| attention_mask_tensor = (1.0 - attention_mask_tensor).int() |
|
|
| |
| |
| |
| |
| prefill_compiled_stage = is_torchdynamo_compiling() and ( |
| (input_ids is not None and input_ids.shape[1] != 1) |
| or (inputs_embeds is not None and inputs_embeds.shape[1] != 1) |
| ) |
| prefill_noncompiled_stage = not is_torchdynamo_compiling() and ( |
| (cache_position is not None and cache_position[0] == 0) |
| or (past_key_values is None or past_key_values.get_seq_length() == 0) |
| ) |
| if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None: |
| position_ids, rope_deltas = self.get_rope_index( |
| input_ids, |
| image_grid_thw, |
| video_grid_thw, |
| second_per_grid_ts=second_per_grid_ts, |
| attention_mask=attention_mask_tensor, |
| ) |
| self.rope_deltas = rope_deltas |
| |
| else: |
| batch_size, seq_length, _ = inputs_embeds.shape |
| delta = ( |
| (cache_position[0] + self.rope_deltas).to(inputs_embeds.device) |
| if cache_position is not None |
| else 0 |
| ) |
| position_ids = torch.arange(seq_length, device=inputs_embeds.device) |
| position_ids = position_ids.view(1, -1).expand(batch_size, -1) |
| if cache_position is not None: |
| delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) |
| position_ids = position_ids.add(delta) |
| position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) |
|
|
| outputs = self.language_model( |
| input_ids=None, |
| position_ids=position_ids, |
| attention_mask=attention_mask, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=True, |
| cache_position=cache_position, |
| **kwargs, |
| ) |
|
|
| output = OpenPanguVLModelOutputWithPast( |
| last_hidden_state=outputs.last_hidden_state, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| rope_deltas=self.rope_deltas, |
| ) |
| return output if return_dict else output.to_tuple() |
|
|
|
|
| @dataclass |
| @auto_docstring( |
| custom_intro=""" |
| Base class for OpenPanguVL causal language model (or autoregressive) outputs. |
| """ |
| ) |
| class OpenPanguVLCausalLMOutputWithPast(ModelOutput): |
| r""" |
| loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
| Language modeling loss (for next-token prediction). |
| logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): |
| Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
| past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| `(batch_size, num_heads, sequence_length, embed_size_per_head)`) |
| |
| Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see |
| `past_key_values` input) to speed up sequential decoding. |
| rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): |
| The rope index difference between sequence length and multimodal rope. |
| """ |
|
|
| loss: Optional[torch.FloatTensor] = None |
| logits: Optional[torch.FloatTensor] = None |
| past_key_values: Optional[list[torch.FloatTensor]] = None |
| hidden_states: Optional[tuple[torch.FloatTensor]] = None |
| attentions: Optional[tuple[torch.FloatTensor]] = None |
| rope_deltas: Optional[torch.LongTensor] = None |
|
|
|
|
| class OpenPanguVL(OpenPanguPreTrainedModel, GenerationMixin): |
| _checkpoint_conversion_mapping = { |
| "^visual": "model.visual", |
| r"^model(?!\.(language_model|visual))": "model.language_model", |
| } |
| _tied_weights_keys = ["lm_head.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.model = OpenPanguVLModel(config) |
| self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) |
|
|
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.model.get_input_embeddings() |
|
|
| def set_input_embeddings(self, value): |
| self.model.set_input_embeddings(value) |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head = new_embeddings |
|
|
| def set_decoder(self, decoder): |
| self.model.set_decoder(decoder) |
|
|
| def get_decoder(self): |
| return self.model.get_decoder() |
|
|
| def get_video_features( |
| self, |
| pixel_values_videos: torch.FloatTensor, |
| video_grid_thw: Optional[torch.LongTensor] = None, |
| ): |
| return self.model.get_video_features(pixel_values_videos, video_grid_thw) |
|
|
| def get_image_features( |
| self, |
| pixel_values: torch.FloatTensor, |
| image_grid_thw: Optional[torch.LongTensor] = None, |
| ): |
| return self.model.get_image_features(pixel_values, image_grid_thw) |
|
|
| |
| @property |
| def language_model(self): |
| return self.model.language_model |
|
|
| @property |
| def visual(self): |
| return self.model.visual |
|
|
| @can_return_tuple |
| @auto_docstring |
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[list[torch.FloatTensor]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| pixel_values: Optional[torch.Tensor] = None, |
| pixel_values_videos: Optional[torch.FloatTensor] = None, |
| image_grid_thw: Optional[torch.LongTensor] = None, |
| video_grid_thw: Optional[torch.LongTensor] = None, |
| rope_deltas: Optional[torch.LongTensor] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| second_per_grid_ts: Optional[torch.Tensor] = None, |
| **kwargs: Unpack[KwargsForCausalLM], |
| ) -> Union[tuple, OpenPanguVLCausalLMOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| pixel_values_videos (`torch.FloatTensor` of shape `(seq_length, |
| num_channels * temporal_size * image_size * image_size)): |
| The tensors corresponding to the input videos. Pixel values can be obtained using |
| [`AutoImageProcessor`]. See [`OpenPanguVLImageProcessor.__call__`] for details. [`OpenPanguVLProcessor`] uses |
| [`OpenPanguVLImageProcessor`] for processing videos. |
| image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): |
| The temporal, height and width of feature shape of each image in LLM. |
| video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): |
| The temporal, height and width of feature shape of each video in LLM. |
| rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): |
| The rope index difference between sequence length and multimodal rope. |
| second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): |
| The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. |
| |
| Example: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, OpenPanguVLForConditionalGeneration |
| |
| >>> model = OpenPanguVLForConditionalGeneration.from_pretrained("Pangu/Pangu_7B_V5_VL_HF_vllm_ascend") |
| >>> processor = AutoProcessor.from_pretrained("Pangu/Pangu_7B_V5_VL_HF_vllm_ascend") |
| |
| >>> messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image"}, |
| {"type": "text", "text": "What is shown in this image?"}, |
| ], |
| }, |
| ] |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| >>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos]) |
| |
| >>> # Generate |
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30) |
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
| "The image shows a street scene with a red stop sign in the foreground. |
| In the background, there is a large red gate with Chinese characters ..." |
| ```""" |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
|
|
| outputs = self.model( |
| input_ids=input_ids, |
| pixel_values=pixel_values, |
| pixel_values_videos=pixel_values_videos, |
| image_grid_thw=image_grid_thw, |
| video_grid_thw=video_grid_thw, |
| second_per_grid_ts=second_per_grid_ts, |
| position_ids=position_ids, |
| attention_mask=attention_mask, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=True, |
| cache_position=cache_position, |
| **kwargs, |
| ) |
|
|
| hidden_states = outputs[0] |
| logits = self.lm_head(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size) |
|
|
| return OpenPanguVLCausalLMOutputWithPast( |
| loss=loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| rope_deltas=outputs.rope_deltas, |
| ) |
|
|
| def prepare_inputs_for_generation( |
| self, |
| input_ids, |
| past_key_values=None, |
| attention_mask=None, |
| inputs_embeds=None, |
| cache_position=None, |
| position_ids=None, |
| use_cache=True, |
| pixel_values=None, |
| pixel_values_videos=None, |
| image_grid_thw=None, |
| video_grid_thw=None, |
| second_per_grid_ts=None, |
| **kwargs, |
| ): |
| |
|
|
| model_inputs = super().prepare_inputs_for_generation( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| inputs_embeds=inputs_embeds, |
| cache_position=cache_position, |
| position_ids=position_ids, |
| pixel_values=pixel_values, |
| pixel_values_videos=pixel_values_videos, |
| image_grid_thw=image_grid_thw, |
| video_grid_thw=video_grid_thw, |
| second_per_grid_ts=second_per_grid_ts, |
| use_cache=use_cache, |
| **kwargs, |
| ) |
|
|
| |
| model_inputs["position_ids"] = None |
|
|
| if cache_position[0] != 0: |
| model_inputs["pixel_values"] = None |
| model_inputs["pixel_values_videos"] = None |
|
|
| return model_inputs |
|
|
| def _get_image_nums_and_video_nums( |
| self, |
| input_ids: Optional[torch.LongTensor], |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| """ |
| Get the number of images and videos for each sample to calculate the separation length of the sample tensor. |
| These parameters are not passed through the processor to avoid unpredictable impacts |
| from interface modifications. |
| |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. |
| |
| Returns: |
| image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`) |
| video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`) |
| """ |
| image_token_id = self.config.image_token_id |
| video_token_id = self.config.video_token_id |
| vision_start_token_id = self.config.vision_start_token_id |
|
|
| vision_start_mask = input_ids == vision_start_token_id |
| vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1) |
| image_mask = input_ids == image_token_id |
| video_mask = input_ids == video_token_id |
| image_nums = torch.sum(vision_first_mask & image_mask, dim=1) |
| video_nums = torch.sum(vision_first_mask & video_mask, dim=1) |
|
|
| return image_nums, video_nums |
|
|
| def _expand_inputs_for_generation( |
| self, |
| expand_size: int = 1, |
| is_encoder_decoder: bool = False, |
| input_ids: Optional[torch.LongTensor] = None, |
| **model_kwargs, |
| ) -> tuple[torch.LongTensor, dict[str, Any]]: |
| |
| |
| |
| |
|
|
| if expand_size == 1: |
| return input_ids, model_kwargs |
|
|
| visual_keys = [ |
| "pixel_values", |
| "image_grid_thw", |
| "pixel_values_videos", |
| "video_grid_thw", |
| "second_per_grid_ts", |
| ] |
|
|
| def _expand_dict_for_generation_visual(dict_to_expand): |
| image_grid_thw = model_kwargs.get("image_grid_thw", None) |
| video_grid_thw = model_kwargs.get("video_grid_thw", None) |
| image_nums, video_nums = self._get_image_nums_and_video_nums(input_ids) |
|
|
| def _repeat_interleave_samples(x, lengths, repeat_times): |
| samples = torch.split(x, lengths) |
| repeat_args = [repeat_times] + [1] * (x.dim() - 1) |
| result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0) |
| return result |
|
|
| for key in dict_to_expand: |
| if key == "pixel_values": |
| |
| samples = torch.split(image_grid_thw, list(image_nums)) |
| |
| lengths = [torch.prod(sample, dim=1).sum() for sample in samples] |
| dict_to_expand[key] = _repeat_interleave_samples( |
| dict_to_expand[key], lengths=lengths, repeat_times=expand_size |
| ) |
| elif key == "image_grid_thw": |
| |
| lengths = list(image_nums) |
| dict_to_expand[key] = _repeat_interleave_samples( |
| dict_to_expand[key], lengths=lengths, repeat_times=expand_size |
| ) |
| elif key == "pixel_values_videos": |
| samples = torch.split(video_grid_thw, list(video_nums)) |
| lengths = [torch.prod(sample, dim=1).sum() for sample in samples] |
| dict_to_expand[key] = _repeat_interleave_samples( |
| dict_to_expand[key], lengths=lengths, repeat_times=expand_size |
| ) |
| elif key == "video_grid_thw": |
| lengths = list(video_nums) |
| dict_to_expand[key] = _repeat_interleave_samples( |
| dict_to_expand[key], lengths=lengths, repeat_times=expand_size |
| ) |
| elif key == "second_per_grid_ts": |
| if not isinstance(dict_to_expand[key], list): |
| raise TypeError( |
| f"Expected value for key '{key}' to be a list,but got {type(dict_to_expand[key])} instead." |
| ) |
| tensor = torch.tensor(dict_to_expand[key]) |
| lengths = list(video_nums) |
| tensor = _repeat_interleave_samples(tensor, lengths=lengths, repeat_times=expand_size) |
| dict_to_expand[key] = tensor.tolist() |
| return dict_to_expand |
|
|
| def _expand_dict_for_generation(dict_to_expand): |
| for key in dict_to_expand: |
| if key != "cache_position": |
| if ( |
| dict_to_expand[key] is not None |
| and isinstance(dict_to_expand[key], torch.Tensor) |
| and key not in visual_keys |
| ): |
| dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) |
| return dict_to_expand |
|
|
| |
| |
| |
| if input_ids is not None and input_ids.numel() != 0: |
| model_kwargs = _expand_dict_for_generation_visual(model_kwargs) |
|
|
| if input_ids is not None: |
| input_ids = input_ids.repeat_interleave(expand_size, dim=0) |
|
|
| model_kwargs = _expand_dict_for_generation(model_kwargs) |
|
|
| if is_encoder_decoder: |
| if model_kwargs.get("encoder_outputs") is None: |
| raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") |
| model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) |
|
|
| return input_ids, model_kwargs |
|
|
|
|
| __all__ = ["OpenPanguVL", "OpenPanguVLModel", "OpenPanguPreTrainedModel"] |
|
|