| import torch |
| from torch import Tensor |
| from torch.cuda.amp import autocast |
| from transformers import AutoModelForCausalLM, AutoModel, AutoTokenizer |
| from model.build import MODEL_REGISTRY, BaseModel |
| from modules.build import build_module |
| from optim.utils import no_decay_param_group |
| import torch.nn as nn |
|
|
| def last_token_pool(last_hidden_states: Tensor, |
| attention_mask: Tensor) -> Tensor: |
| left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0]) |
| if left_padding: |
| return last_hidden_states[:, -1] |
| else: |
| sequence_lengths = attention_mask.sum(dim=1) - 1 |
| batch_size = last_hidden_states.shape[0] |
| return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths] |
|
|
| @MODEL_REGISTRY.register() |
| class OpenVocab(BaseModel): |
| def __init__(self, cfg): |
| super().__init__(cfg) |
| self.cfg = cfg |
| model_root = "fg-clip-base" |
| self.pm_encoder = AutoModelForCausalLM.from_pretrained(model_root, trust_remote_code=True) |
|
|
| if cfg.mode in ['warmup', 'pretrain']: |
| self.frozen_model = AutoModelForCausalLM.from_pretrained(model_root, trust_remote_code=True) |
| self.use_scene_cap = self.cfg.data.args.get("use_scene_cap", False) |
| self.set_training_mode() |
| else: |
| self.text_encoder = AutoModel.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True) |
| self.tokenizer = AutoTokenizer.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True) |
| self.text_encoder.text_model.output_tokens = True |
| self.set_downstream_mode() |
|
|
| self.head_list = self.cfg.model.heads.head_list |
| for head in self.head_list: |
| setattr(self, head, build_module("heads", getattr(self.cfg.model.heads, head))) |
|
|
| def set_training_mode(self): |
| for name, param in self.frozen_model.named_parameters(): |
| param.requires_grad = False |
| |
| for name, param in self.pm_encoder.named_parameters(): |
| if "text_model" in name: |
| param.requires_grad = False |
| |
| self.pm_encoder.train() |
| self.frozen_model.eval() |
|
|
| def set_downstream_mode(self): |
| """Set the model to downstream mode.""" |
| for param in self.pm_encoder.parameters(): |
| param.requires_grad = False |
| |
| for name, param in self.text_encoder.named_parameters(): |
| if "vision_model" in name: |
| param.requires_grad = False |
| |
| self.pm_encoder.eval() |
| self.text_encoder.train() |
| |
| def forward(self, data_dict, mode=None): |
| |
| if 'cur_step' not in data_dict: |
| data_dict['cur_step'] = 1 |
| data_dict['total_steps'] = 1 |
| |
| data_dict['logit_scale'] = self.pm_encoder.logit_scale.exp() |
| if mode == "warmup": |
| B, C, H, W = data_dict["images"].shape |
| data_dict["point_map"] = data_dict["point_map"].to(torch.bfloat16, non_blocking=True).permute(0, 3, 1, 2) |
| data_dict["txt_ids"] = data_dict["txt_ids"].view(B, -1) |
| with torch.autocast("cuda", dtype=torch.bfloat16): |
| pm = data_dict["point_map"] |
| _, data_dict["inter_view_pm_embed"] = self.pm_encoder.get_image_features(pm) |
| with torch.no_grad(): |
| data_dict["inter_view_txt_embed"] = self.frozen_model.get_text_features(data_dict["txt_ids"]) |
| _, data_dict["inter_view_rgb_embed"] = self.frozen_model.get_image_features(data_dict["images"]) |
| elif mode == 'pretrain': |
| pm_basic_features = [] |
| data_dict['point_map'] = data_dict['point_map'].to(torch.bfloat16, non_blocking=True).permute(0, 1, 4, 2, 3) |
| for i in range(data_dict['point_map'].shape[0]): |
| with autocast(dtype=torch.bfloat16): |
| pm = data_dict['point_map'][i] |
| _, pm_feat = self.pm_encoder.get_image_features(data_dict['point_map'][i]) |
| pm_basic_features.append(pm_feat) |
| |
| data_dict['inter_view_pm_embed'] = torch.stack(pm_basic_features, dim=0) |
| data_dict['scene_pm_embed'] = data_dict['inter_view_pm_embed'].mean(dim=1) |
|
|
| B_txt = data_dict['txt_ids'].shape[0] |
| lang_basic_features = torch.empty((B_txt, 32, 512), dtype=torch.bfloat16, device=data_dict['txt_ids'].device) |
| rgb_basic_features = torch.empty((B_txt, 32, 512), dtype=torch.bfloat16, device=data_dict['txt_ids'].device) |
| with torch.no_grad(): |
| with autocast(dtype=torch.bfloat16): |
| for i in range(B_txt): |
| lang_basic_features[i] = self.frozen_model.get_text_features(data_dict['txt_ids'][i], walk_short_pos=True) |
| rgb_basic_features[i] = self.frozen_model.get_image_features(data_dict['images'][i])[1] |
|
|
| if getattr(self, "use_scene_cap", False): |
| data_dict['scene_text_embed'] = self.frozen_model.get_text_features(data_dict['scene_txt_ids'], walk_short_pos=False) |
| |
| data_dict['inter_view_txt_embed'] = lang_basic_features |
| data_dict['inter_view_rgb_embed'] = rgb_basic_features |
| data_dict['scene_rgb_embed'] = rgb_basic_features.mean(dim=1) |
| elif mode == 'qa': |
| B, V, C, H, W = data_dict['point_map'].shape |
| pm = data_dict['point_map'].reshape(B * V, C, H, W) |
| with torch.no_grad(): |
| with autocast(dtype=torch.bfloat16): |
| _, pm_feats = self.pm_encoder.get_image_features(pm) |
| data_dict['inter_view_pm_embed'] = pm_feats.reshape(B, V, -1) |
| |
| |
| tokenized = self.tokenizer.batch_encode_plus( |
| data_dict['sentence'], |
| padding="max_length", |
| return_tensors="pt", |
| max_length=256, |
| ).to(data_dict['inter_view_pm_embed'].device) |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| data_dict['txt_ids'] = tokenized['input_ids'] |
| with autocast(dtype=torch.bfloat16): |
| data_dict['inter_view_txt_tokens'] = self.text_encoder.text_model(data_dict['txt_ids'])[-1] |
| data_dict['attention_mask'] = tokenized['attention_mask'].ne(1).bool() |
| |
| |
|
|
| |
| if hasattr(self, "qa_head") and self.qa_head is not None: |
| answer_scores = self.qa_head( |
| data_dict['inter_view_pm_embed'], |
| data_dict['inter_view_txt_tokens'], |
| data_dict['attention_mask'] |
| ) |
| data_dict['answer_scores'] = answer_scores |
| return data_dict |
|
|
| def get_vision_params(self, model): |
| return [(n, p) for n, p in model.named_parameters() if p.requires_grad] |
| |
| def get_text_params(self, model): |
| text_params = [ |
| (n, p) for n, p in model.named_parameters() |
| if "text_model" in n |
| ] |
| return text_params |
| |
| def get_opt_params(self): |
| def get_lr(cfg, default_lr): |
| return default_lr if cfg.get("lr") is None else cfg.get("lr") |
|
|
| optimizer_grouped_parameters = [] |
| if self.cfg.mode == 'warmup': |
| optimizer_grouped_parameters += no_decay_param_group(self.get_vision_params(self.pm_encoder),get_lr(self.cfg.model.vision, self.cfg.solver.lr)) |
| elif self.cfg.mode == 'pretrain': |
| optimizer_grouped_parameters += no_decay_param_group(self.get_vision_params(self.pm_encoder),get_lr(self.cfg.model.vision, self.cfg.solver.lr)) |
| else: |
| optimizer_grouped_parameters += no_decay_param_group(self.get_text_params(self.text_encoder), get_lr(self.cfg.model.vision, self.cfg.solver.lr)) |
| if "qa_head" in self.head_list: |
| optimizer_grouped_parameters += no_decay_param_group( |
| self.qa_head.named_parameters(), get_lr(self.cfg.model.heads.qa_head, self.cfg.solver.lr) |
| ) |
| if "ground_head" in self.head_list: |
| optimizer_grouped_parameters += no_decay_param_group( |
| self.ground_head.named_parameters(), get_lr(self.cfg.model.heads.ground_head, self.cfg.solver.lr) |
| ) |
| |
| return optimizer_grouped_parameters |