| |
| |
| |
| |
| |
|
|
| import warnings |
| from typing import List, Optional, Tuple, Union |
|
|
| import torch.utils.checkpoint |
| import transformers |
| from torch import nn |
| from torch.nn import CrossEntropyLoss |
| from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM, |
| LlamaTokenizer) |
| from transformers.modeling_outputs import CausalLMOutputWithPast |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import ModelOutput, logging |
|
|
| from .configuration_pvc_internvl import PVCInternVLConfig |
| from .conversation import get_conv_template |
| from .modeling_intern_vit import InternVisionModel, has_flash_attn |
| from .modeling_intern_vit_pvc import InternVisionTemporalModel, AdaLayerNorm, Timesteps, temporal_idx_abs_to_rel |
| from .modeling_internlm2 import InternLM2ForCausalLM |
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| def version_cmp(v1, v2, op='eq'): |
| import operator |
|
|
| from packaging import version |
| op_func = getattr(operator, op) |
| return op_func(version.parse(v1), version.parse(v2)) |
|
|
|
|
| class AdaLNMLP(nn.Module): |
| def __init__(self, input_dim, output_dim, use_temporal_condition=False, |
| use_rel_timestep=False, rel_timestep_scale=100): |
| super().__init__() |
| |
| self.condition_proj = nn.Sequential( |
| nn.Linear(input_dim, input_dim), |
| nn.SiLU(), |
| nn.Linear(input_dim, input_dim) |
| ) |
| self.use_temporal_condition = use_temporal_condition |
| self.use_rel_timestep = use_rel_timestep |
| self.rel_timestep_scale = rel_timestep_scale |
| |
| if use_temporal_condition: |
| self.time_embed = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) |
| self.time_proj = nn.Sequential( |
| nn.Linear(256, input_dim), |
| nn.SiLU(), |
| nn.Linear(input_dim, input_dim) |
| ) |
| |
| |
| self.adaln = AdaLayerNorm(input_dim, input_dim) |
| |
| self.mlp = nn.Sequential( |
| nn.Linear(input_dim, output_dim), |
| nn.GELU(), |
| nn.Linear(output_dim, output_dim) |
| ) |
| self.gradient_checkpointing = False |
| |
| def forward(self, x, split_sizes, temporal_id=None): |
| condition = self.condition_proj(x) |
| |
| if self.use_temporal_condition: |
| t = temporal_id |
| if self.use_rel_timestep: |
| t = temporal_idx_abs_to_rel(temporal_id, split_sizes) |
| t = t * self.rel_timestep_scale |
| t_embed = self.time_embed(t) |
| t_embed = self.time_proj(t_embed.to(x.dtype)) |
| condition = condition + t_embed.unsqueeze(1) |
| x = self.adaln(x, condition) |
| x = self.mlp(x) |
| return x |
|
|
|
|
| def build_projector_module(config: PVCInternVLConfig): |
| vit_hidden_size = config.vision_config.hidden_size |
| llm_hidden_size = config.llm_config.hidden_size |
| |
| if config.mlp_add_ops is not None and 'adaln' in config.mlp_add_ops: |
| mlp_input_dim = vit_hidden_size * int(1 / config.downsample_ratio) ** 2 |
| use_temporal_condition = ('temporal' in config.mlp_add_ops) |
| use_rel_timestep = ('rel' in config.mlp_add_ops) |
| mlp1 = AdaLNMLP(mlp_input_dim, llm_hidden_size, |
| use_temporal_condition=use_temporal_condition, |
| use_rel_timestep=use_rel_timestep) |
| else: |
| mlp1 = nn.Sequential( |
| nn.LayerNorm(vit_hidden_size * int(1 / config.downsample_ratio) ** 2), |
| nn.Linear(vit_hidden_size * int(1 / config.downsample_ratio) ** 2, llm_hidden_size), |
| nn.GELU(), |
| nn.Linear(llm_hidden_size, llm_hidden_size) |
| ) |
| return mlp1 |
|
|
|
|
| def forward_projector(projector, x, **kwargs): |
| if isinstance(projector, nn.Sequential): |
| return projector(x) |
| else: |
| return projector(x, **kwargs) |
|
|
|
|
| class PVCInternVLModel(PreTrainedModel): |
| config_class = PVCInternVLConfig |
| main_input_name = 'pixel_values' |
| base_model_prefix = 'language_model' |
| _supports_flash_attn_2 = True |
| _no_split_modules = ['InternVisionModel', 'InternVisionTemporalModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer'] |
|
|
| def __init__(self, config: PVCInternVLConfig, vision_model=None, language_model=None, delay_init_new_param=False, use_flash_attn=True): |
| super().__init__(config) |
|
|
| assert version_cmp(transformers.__version__, '4.37.0', 'ge') |
| image_size = config.force_image_size or config.vision_config.image_size |
| patch_size = config.vision_config.patch_size |
| self.patch_size = patch_size |
| self.select_layer = config.select_layer |
| self.template = config.template |
| self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2)) |
| self.num_frame_token = self.num_image_token |
| self.downsample_ratio = config.downsample_ratio |
| self.ps_version = config.ps_version |
| use_flash_attn = use_flash_attn if has_flash_attn else False |
| config.vision_config.use_flash_attn = True if use_flash_attn else False |
| config.llm_config.attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager' |
|
|
| logger.info(f'num_image_token: {self.num_image_token}') |
| logger.info(f'num_frame_token: {self.num_frame_token}') |
| logger.info(f'ps_version: {self.ps_version}') |
|
|
| if vision_model is not None: |
| self.vision_model = vision_model |
| else: |
| if config.use_temporal: |
| self.vision_model = InternVisionTemporalModel(config.vision_config, delay_init_new_param=delay_init_new_param) |
| else: |
| self.vision_model = InternVisionModel(config.vision_config) |
| if language_model is not None: |
| self.language_model = language_model |
| else: |
| if config.llm_config.architectures[0] == 'LlamaForCausalLM': |
| self.language_model = LlamaForCausalLM(config.llm_config) |
| elif config.llm_config.architectures[0] == 'InternLM2ForCausalLM': |
| self.language_model = InternLM2ForCausalLM(config.llm_config) |
| else: |
| raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.') |
|
|
| self.mlp1 = build_projector_module(config) |
|
|
| self.img_context_token_id = None |
| self.conv_template = get_conv_template(self.template) |
| self.system_message = self.conv_template.system_message |
|
|
| def forward( |
| self, |
| pixel_values: torch.FloatTensor, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| image_flags: Optional[torch.LongTensor] = None, |
| split_sizes: Optional[torch.LongTensor] = None, |
| temporal_id: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[List[torch.FloatTensor]] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CausalLMOutputWithPast]: |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| image_flags = image_flags.squeeze(-1) |
| input_embeds = self.language_model.get_input_embeddings()(input_ids).clone() |
|
|
| vit_embeds = self.extract_feature(pixel_values, split_sizes=split_sizes, temporal_id=temporal_id) |
| vit_embeds = vit_embeds[image_flags == 1] |
| vit_batch_size = pixel_values.shape[0] |
|
|
| B, N, C = input_embeds.shape |
| input_embeds = input_embeds.reshape(B * N, C) |
|
|
| if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0: |
| print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}') |
|
|
| input_ids = input_ids.reshape(B * N) |
| selected = (input_ids == self.img_context_token_id) |
| try: |
| input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C) |
| except Exception as e: |
| vit_embeds = vit_embeds.reshape(-1, C) |
| print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, ' |
| f'vit_embeds.shape={vit_embeds.shape}') |
| n_token = selected.sum() |
| input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token] |
|
|
| input_embeds = input_embeds.reshape(B, N, C) |
|
|
| outputs = self.language_model( |
| inputs_embeds=input_embeds, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| logits = outputs.logits |
|
|
| loss = None |
| if labels is not None: |
| |
| shift_logits = logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| |
| loss_fct = CrossEntropyLoss() |
| shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size) |
| shift_labels = shift_labels.view(-1) |
| |
| shift_labels = shift_labels.to(shift_logits.device) |
| loss = loss_fct(shift_logits, shift_labels) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[1:] |
| return (loss,) + output if loss is not None else output |
|
|
| return CausalLMOutputWithPast( |
| loss=loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
| def pixel_shuffle(self, x, scale_factor=0.5): |
| n, w, h, c = x.size() |
| |
| x = x.view(n, w, int(h * scale_factor), int(c / scale_factor)) |
| |
| x = x.permute(0, 2, 1, 3).contiguous() |
| |
| x = x.view(n, int(h * scale_factor), int(w * scale_factor), |
| int(c / (scale_factor * scale_factor))) |
| if self.ps_version == 'v1': |
| warnings.warn("In ps_version 'v1', the height and width have not been swapped back, " |
| 'which results in a transposed image.') |
| else: |
| x = x.permute(0, 2, 1, 3).contiguous() |
| return x |
|
|
| def extract_feature(self, pixel_values, split_sizes=None, temporal_id=None): |
| kwargs = {} |
| |
| if self.config.use_temporal: |
| if split_sizes is not None: |
| if isinstance(split_sizes, torch.Tensor): |
| split_sizes = split_sizes.tolist() |
| else: |
| split_sizes = [pixel_values.shape[0]] |
| assert sum(split_sizes) == pixel_values.shape[0] |
| kwargs['split_sizes'] = split_sizes |
| kwargs['temporal_id'] = temporal_id |
|
|
| if self.select_layer == -1: |
| vit_embeds = self.vision_model( |
| pixel_values=pixel_values, |
| output_hidden_states=False, |
| return_dict=True, |
| **kwargs |
| ).last_hidden_state |
| else: |
| vit_embeds = self.vision_model( |
| pixel_values=pixel_values, |
| output_hidden_states=True, |
| return_dict=True, |
| **kwargs |
| ).hidden_states[self.select_layer] |
| vit_embeds = vit_embeds[:, 1:, :] |
|
|
| h = w = int(vit_embeds.shape[1] ** 0.5) |
| vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1) |
| vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio) |
| vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1]) |
| vit_embeds = forward_projector(self.mlp1, vit_embeds, split_sizes=split_sizes, temporal_id=temporal_id) |
| return vit_embeds |
|
|
| def batch_chat(self, tokenizer, pixel_values, questions, generation_config, split_sizes=None, data_flag=None, |
| num_patches_list=None, history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', |
| IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None): |
| if history is not None or return_history: |
| print('Now multi-turn chat is not supported in batch_chat.') |
| raise NotImplementedError |
|
|
| if image_counts is not None: |
| num_patches_list = image_counts |
| print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.') |
|
|
| img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) |
| self.img_context_token_id = img_context_token_id |
|
|
| if verbose and pixel_values is not None: |
| image_bs = pixel_values.shape[0] |
| print(f'dynamic ViT batch size: {image_bs}') |
|
|
| queries = [] |
| for idx, num_patches in enumerate(num_patches_list): |
| question = questions[idx] |
| if pixel_values is not None and '<image>' not in question: |
| question = '<image>\n' + question |
| template = get_conv_template(self.template) |
| template.system_message = self.system_message |
| template.append_message(template.roles[0], question) |
| template.append_message(template.roles[1], None) |
| query = template.get_prompt() |
|
|
| image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN |
| query = query.replace('<image>', image_tokens, 1) |
| queries.append(query) |
|
|
| tokenizer.padding_side = 'left' |
| model_inputs = tokenizer(queries, return_tensors='pt', padding=True) |
| input_ids = model_inputs['input_ids'].to(self.device) |
| attention_mask = model_inputs['attention_mask'].to(self.device) |
| eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip()) |
| generation_config['eos_token_id'] = eos_token_id |
| generation_output = self.generate( |
| pixel_values=pixel_values, |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| split_sizes=split_sizes, |
| **generation_config |
| ) |
| responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True) |
| responses = [response.split(template.sep)[0].strip() for response in responses] |
| return responses |
|
|
| def chat(self, tokenizer, pixel_values, question, generation_config, num_patches_list=None, |
| split_sizes=None, data_flag=None, history=None, return_history=False, |
| IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False): |
| |
| flag = data_flag[0].item() if data_flag is not None else 1 |
|
|
| if history is None and pixel_values is not None and '<image>' not in question: |
| question = '<image>\n' + question |
|
|
| if num_patches_list is None: |
| num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else [] |
| assert pixel_values is None or len(pixel_values) == sum(num_patches_list) |
| |
| temporal_id = torch.arange(len(pixel_values), dtype=torch.long, device=pixel_values.device) |
| if self.config.tile_repeat_way == 'cycle': |
| new_temporal_id = [] |
| for tid, n_tile in enumerate(num_patches_list): |
| new_temporal_id.append(torch.tensor([tid] * n_tile, dtype=torch.long, device=pixel_values.device)) |
| temporal_id = torch.cat(new_temporal_id) |
|
|
| if (flag == 1 or flag == 2) and self.config.image_repeat_time > 1: |
| if self.config.tile_repeat_way == 'cycle': |
| cur_st = 0 |
| new_pixel_values, new_temporal_id = [], [] |
| for img_idx, n_tile in enumerate(num_patches_list): |
| image = pixel_values[cur_st:cur_st+n_tile] |
| new_pixel_values.append(torch.cat([image for _ in range(self.config.image_repeat_time)], dim=0)) |
| new_temporal_id.append(torch.arange(img_idx * self.config.image_repeat_time, (img_idx + 1) * self.config.image_repeat_time, |
| dtype=torch.long, device=temporal_id.device).repeat_interleave(n_tile, dim=0)) |
| cur_st += n_tile |
| new_pixel_values = torch.cat(new_pixel_values, dim=0) |
| new_temporal_id = torch.cat(new_temporal_id, dim=0) |
| assert cur_st == len(pixel_values) |
| assert len(new_pixel_values) == len(new_temporal_id) == len(pixel_values) * self.config.image_repeat_time |
| pixel_values, temporal_id = new_pixel_values, new_temporal_id |
| else: |
| pixel_values = pixel_values.repeat_interleave(self.config.image_repeat_time, dim=0) |
| temporal_id = torch.arange(len(pixel_values), dtype=torch.long, device=pixel_values.device) |
| split_sizes = [s * self.config.image_repeat_time for s in split_sizes] if split_sizes is not None else None |
| num_patches_list = [n * self.config.image_repeat_time for n in num_patches_list] if num_patches_list is not None else None |
| if flag == 3 and self.config.video_repeat_time > 1: |
| pixel_values = pixel_values.repeat_interleave(self.config.video_repeat_time, dim=0) |
| if self.config.tile_repeat_way == 'cycle': |
| new_temporal_id = [] |
| for img_idx, n_tile in enumerate(num_patches_list): |
| new_temporal_id.append(torch.arange(img_idx * self.config.video_repeat_time, (img_idx + 1) * self.config.video_repeat_time, |
| dtype=torch.long, device=temporal_id.device).repeat_interleave(n_tile, dim=0)) |
| temporal_id = torch.cat(new_temporal_id, dim=0) |
| else: |
| temporal_id = torch.arange(len(pixel_values), dtype=torch.long, device=pixel_values.device) |
| split_sizes = [s * self.config.video_repeat_time for s in split_sizes] if split_sizes is not None else None |
| num_patches_list = [n * self.config.video_repeat_time for n in num_patches_list] if num_patches_list is not None else None |
|
|
| img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) |
| self.img_context_token_id = img_context_token_id |
|
|
| template = get_conv_template(self.template) |
| template.system_message = self.system_message |
| eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip()) |
|
|
| history = [] if history is None else history |
| for (old_question, old_answer) in history: |
| template.append_message(template.roles[0], old_question) |
| template.append_message(template.roles[1], old_answer) |
| template.append_message(template.roles[0], question) |
| template.append_message(template.roles[1], None) |
| query = template.get_prompt() |
|
|
| if verbose and pixel_values is not None: |
| image_bs = pixel_values.shape[0] |
| print(f'dynamic ViT batch size: {image_bs}') |
|
|
| for num_patches in num_patches_list: |
| if flag == 0: |
| num_image_token = 0 |
| elif (flag == 1 or flag == 2): |
| num_image_token = self.num_image_token * num_patches |
| else: |
| num_image_token = self.num_frame_token * num_patches |
| image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * num_image_token + IMG_END_TOKEN |
| query = query.replace('<image>', image_tokens, 1) |
|
|
| model_inputs = tokenizer(query, return_tensors='pt') |
| input_ids = model_inputs['input_ids'].to(self.device) |
| attention_mask = model_inputs['attention_mask'].to(self.device) |
| generation_config['eos_token_id'] = eos_token_id |
| generation_output = self.generate( |
| pixel_values=pixel_values, |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| split_sizes=split_sizes, |
| temporal_id=temporal_id, |
| **generation_config |
| ) |
| response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0] |
| response = response.split(template.sep.strip())[0].strip() |
| history.append((question, response)) |
| if return_history: |
| return response, history |
| else: |
| query_to_print = query.replace(IMG_CONTEXT_TOKEN, '') |
| query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>') |
| if verbose: |
| print(query_to_print, response) |
| return response |
|
|
| @torch.no_grad() |
| def generate( |
| self, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| input_ids: Optional[torch.FloatTensor] = None, |
| attention_mask: Optional[torch.LongTensor] = None, |
| visual_features: Optional[torch.FloatTensor] = None, |
| generation_config: Optional[GenerationConfig] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| split_sizes: Optional[torch.LongTensor] = None, |
| temporal_id: Optional[torch.LongTensor] = None, |
| **generate_kwargs, |
| ) -> torch.LongTensor: |
|
|
| assert self.img_context_token_id is not None |
| if pixel_values is not None: |
| if visual_features is not None: |
| vit_embeds = visual_features |
| else: |
| vit_embeds = self.extract_feature(pixel_values, split_sizes=split_sizes, temporal_id=temporal_id) |
| input_embeds = self.language_model.get_input_embeddings()(input_ids) |
| B, N, C = input_embeds.shape |
| input_embeds = input_embeds.reshape(B * N, C) |
|
|
| input_ids = input_ids.reshape(B * N) |
| selected = (input_ids == self.img_context_token_id) |
| assert selected.sum() != 0 |
| input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device) |
|
|
| input_embeds = input_embeds.reshape(B, N, C) |
| else: |
| input_embeds = self.language_model.get_input_embeddings()(input_ids) |
|
|
| outputs = self.language_model.generate( |
| inputs_embeds=input_embeds, |
| attention_mask=attention_mask, |
| generation_config=generation_config, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| use_cache=True, |
| **generate_kwargs, |
| ) |
|
|
| return outputs |
|
|