JPShi commited on
Commit
23777f9
·
verified ·
1 Parent(s): 6b89cc6

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,4 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- assets/model.jpg filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
modeling_sa2va_chat.py ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import warnings
8
+ from typing import Any, List, Optional, Tuple, Union
9
+
10
+ import torchvision.transforms as T
11
+ from torchvision.transforms.functional import InterpolationMode
12
+
13
+ import torch.utils.checkpoint
14
+ import transformers
15
+
16
+ from .modeling_internlm2 import InternLM2ForCausalLM
17
+ from .modeling_phi3 import Phi3ForCausalLM
18
+ from peft import LoraConfig, get_peft_model
19
+ from torch import nn
20
+ from torch.nn import CrossEntropyLoss
21
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
22
+ LlamaTokenizer, Qwen2ForCausalLM)
23
+ from transformers.modeling_outputs import CausalLMOutputWithPast
24
+ from transformers.modeling_utils import PreTrainedModel
25
+ from transformers.utils import ModelOutput, logging
26
+ from transformers import StoppingCriteriaList, StoppingCriteria
27
+
28
+ from .configuration_sa2va_chat import Sa2VAChatConfig
29
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
30
+
31
+ from .sam2 import SAM2
32
+ from .templates import PROMPT_TEMPLATE
33
+
34
+ import numpy as np
35
+ from torchvision.transforms.functional import resize, to_pil_image
36
+
37
+ from types import MethodType
38
+ import torch.nn.functional as F
39
+
40
+ try:
41
+ from .flash_attention import FlashAttention
42
+ has_flash_attn = True
43
+ except:
44
+ print('FlashAttention is not installed.')
45
+ has_flash_attn = False
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ def version_cmp(v1, v2, op='eq'):
50
+ import operator
51
+
52
+ from packaging import version
53
+ op_func = getattr(operator, op)
54
+ return op_func(version.parse(v1), version.parse(v2))
55
+
56
+ class StopWordStoppingCriteria(StoppingCriteria):
57
+ """StopWord stopping criteria."""
58
+
59
+ def __init__(self, tokenizer, stop_word):
60
+ self.tokenizer = tokenizer
61
+ self.stop_word = stop_word
62
+ self.length = len(self.stop_word)
63
+
64
+ def __call__(self, input_ids, *args, **kwargs) -> bool:
65
+ cur_text = self.tokenizer.decode(input_ids[0])
66
+ cur_text = cur_text.replace('\r', '').replace('\n', '')
67
+ return cur_text[-self.length:] == self.stop_word
68
+
69
+ def get_stop_criteria(
70
+ tokenizer,
71
+ stop_words=[],
72
+ ):
73
+ stop_criteria = StoppingCriteriaList()
74
+ for word in stop_words:
75
+ stop_criteria.append(StopWordStoppingCriteria(tokenizer, word))
76
+ return stop_criteria
77
+
78
+ class DirectResize:
79
+ def __init__(self, target_length: int) -> None:
80
+ self.target_length = target_length
81
+
82
+ def apply_image(self, image: np.ndarray) -> np.ndarray:
83
+ """
84
+ Expects a numpy array with shape HxWxC in uint8 format.
85
+ """
86
+ img = to_pil_image(image, mode='RGB')
87
+ return np.array(img.resize((self.target_length, self.target_length)))
88
+
89
+ class Sa2VAChatModel(PreTrainedModel):
90
+ config_class = Sa2VAChatConfig
91
+ main_input_name = 'pixel_values'
92
+ base_model_prefix = 'language_model'
93
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer',
94
+ 'Phi3DecoderLayer', 'Qwen2DecoderLayer', 'SAM2']
95
+ _supports_flash_attn_2 = True
96
+ supports_gradient_checkpointing = True
97
+
98
+ def __init__(self, config: Sa2VAChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
99
+ super().__init__(config)
100
+
101
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
102
+ image_size = config.force_image_size or config.vision_config.image_size
103
+ patch_size = config.vision_config.patch_size
104
+ self.patch_size = patch_size
105
+ self.select_layer = config.select_layer
106
+ self.template = config.template
107
+ self.template = self.template.replace('-', '_')
108
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
109
+ self.downsample_ratio = config.downsample_ratio
110
+ self.ps_version = config.ps_version
111
+ self.llm_arch_name = config.llm_config.architectures[0]
112
+
113
+ self.fast_pool_size = 4
114
+ self.fast_pool = nn.AdaptiveAvgPool2d((self.fast_pool_size, self.fast_pool_size))
115
+
116
+ use_flash_attn = use_flash_attn if has_flash_attn else False
117
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
118
+ config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
119
+
120
+ logger.info(f'num_image_token: {self.num_image_token}')
121
+ logger.info(f'ps_version: {self.ps_version}')
122
+ if vision_model is not None:
123
+ self.vision_model = vision_model
124
+ else:
125
+ self.vision_model = InternVisionModel(config.vision_config)
126
+ if language_model is not None:
127
+ self.language_model = language_model
128
+ else:
129
+ if config.llm_config.architectures[0] == 'LlamaForCausalLM':
130
+ self.language_model = LlamaForCausalLM(config.llm_config)
131
+ elif config.llm_config.architectures[0] == 'InternLM2ForCausalLM':
132
+ self.language_model = InternLM2ForCausalLM(config.llm_config)
133
+ elif config.llm_config.architectures[0] == 'Phi3ForCausalLM':
134
+ self.language_model = Phi3ForCausalLM(config.llm_config)
135
+ elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
136
+ self.language_model = Qwen2ForCausalLM(config.llm_config)
137
+ else:
138
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
139
+
140
+ vit_hidden_size = config.vision_config.hidden_size
141
+ llm_hidden_size = config.llm_config.hidden_size
142
+
143
+ self.mlp1 = nn.Sequential(
144
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
145
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
146
+ nn.GELU(),
147
+ nn.Linear(llm_hidden_size, llm_hidden_size)
148
+ )
149
+
150
+ self.img_context_token_id = None
151
+ self.conv_template = PROMPT_TEMPLATE[self.template]
152
+ self.template = self.conv_template
153
+ if hasattr(config, 'system_message'):
154
+ self.system_message = config.system_message
155
+ self.num_samples = 0
156
+
157
+ if config.use_backbone_lora:
158
+ self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora)
159
+
160
+ if config.use_llm_lora:
161
+ self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora)
162
+
163
+ self.grounding_encoder = SAM2()
164
+ out_dim = self.grounding_encoder.hidden_dim
165
+ in_dim = llm_hidden_size
166
+ self.text_hidden_fcs = nn.Sequential(
167
+ nn.Linear(in_dim, in_dim), nn.ReLU(inplace=True),
168
+ nn.Linear(in_dim, out_dim), nn.Dropout(0.0)
169
+ )
170
+
171
+ self.init_prediction_config = False
172
+
173
+ def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
174
+ lora_config = LoraConfig(
175
+ r=r,
176
+ target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'],
177
+ lora_alpha=lora_alpha,
178
+ lora_dropout=lora_dropout,
179
+ )
180
+ self.vision_model = get_peft_model(self.vision_model, lora_config)
181
+ self.vision_model.print_trainable_parameters()
182
+
183
+ def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
184
+ # Determine the target modules based on the architecture of the language model
185
+ if self.llm_arch_name == 'InternLM2ForCausalLM':
186
+ target_modules = ['attention.wqkv', 'attention.wo', 'feed_forward.w1', 'feed_forward.w2', 'feed_forward.w3']
187
+ elif self.llm_arch_name == 'Phi3ForCausalLM':
188
+ target_modules = ['mlp.down_proj', 'mlp.gate_up_proj', 'self_attn.o_proj', 'self_attn.qkv_proj']
189
+ elif self.llm_arch_name in ['Qwen2ForCausalLM', 'LlamaForCausalLM']:
190
+ target_modules = ['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj',
191
+ 'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj']
192
+ else:
193
+ raise NotImplemented
194
+ lora_config = LoraConfig(
195
+ r=r,
196
+ target_modules=target_modules,
197
+ lora_alpha=lora_alpha,
198
+ lora_dropout=lora_dropout,
199
+ task_type='CAUSAL_LM'
200
+ )
201
+ self.language_model = get_peft_model(self.language_model, lora_config)
202
+ self.language_model.enable_input_require_grads()
203
+ self.language_model.print_trainable_parameters()
204
+
205
+ def pixel_shuffle(self, x, scale_factor=0.5):
206
+ n, w, h, c = x.size()
207
+ # N, W, H, C --> N, W, H * scale, C // scale
208
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
209
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
210
+ x = x.permute(0, 2, 1, 3).contiguous()
211
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
212
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
213
+ int(c / (scale_factor * scale_factor)))
214
+ if self.ps_version == 'v1':
215
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
216
+ 'which results in a transposed image.')
217
+ else:
218
+ x = x.permute(0, 2, 1, 3).contiguous()
219
+ return x
220
+
221
+ def extract_feature(self, pixel_values):
222
+ if self.select_layer == -1:
223
+ vit_embeds = self.vision_model(
224
+ pixel_values=pixel_values,
225
+ output_hidden_states=False,
226
+ return_dict=True).last_hidden_state
227
+ else:
228
+ vit_embeds = self.vision_model(
229
+ pixel_values=pixel_values,
230
+ output_hidden_states=True,
231
+ return_dict=True).hidden_states[self.select_layer]
232
+ vit_embeds = vit_embeds[:, 1:, :]
233
+
234
+ h = w = int(vit_embeds.shape[1] ** 0.5)
235
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
236
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
237
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
238
+ vit_embeds = self.mlp1(vit_embeds)
239
+ return vit_embeds
240
+
241
+ @property
242
+ def lm_head(self):
243
+ return self.language_model.get_output_embeddings()
244
+
245
+ def get_input_embeddings(self):
246
+ return self.language_model.get_input_embeddings()
247
+
248
+ def get_output_embeddings(self):
249
+ return self.language_model.get_output_embeddings()
250
+
251
+ def forward(self, data, data_samples=None, mode='loss'):
252
+ pixel_values = data['pixel_values']
253
+
254
+ if type(pixel_values) is list or pixel_values.ndim == 5:
255
+ if type(pixel_values) is list:
256
+ pixel_values = [
257
+ x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values
258
+ ]
259
+ # b*n, c, h, w
260
+ concat_images = torch.cat(
261
+ [image.to(self.vision_model.dtype) for image in pixel_values], dim=0)
262
+ else:
263
+ raise NotImplementedError()
264
+
265
+ input_ids = data['input_ids']
266
+ position_ids = data['position_ids']
267
+ attention_mask = data['attention_mask']
268
+ # sum is 0 are text
269
+ image_flags = torch.sum(concat_images, dim=(1, 2, 3)) != 0
270
+ image_flags = image_flags.long()
271
+
272
+ labels = data['labels']
273
+ use_cache = False
274
+
275
+ if 'vp_overall_mask' not in data.keys():
276
+ vp_overall_mask = None
277
+ else:
278
+ vp_overall_mask = data['vp_overall_mask']
279
+
280
+ if 'prompt_masks' in data.keys():
281
+ prompt_masks = data['prompt_masks']
282
+ else:
283
+ prompt_masks = None
284
+
285
+ outputs = self._llm_forward(
286
+ input_ids=input_ids,
287
+ position_ids=position_ids,
288
+ attention_mask=attention_mask,
289
+ image_flags=image_flags,
290
+ pixel_values=concat_images,
291
+ labels=labels,
292
+ use_cache=use_cache,
293
+ output_hidden_states=True,
294
+ vp_overall_mask=vp_overall_mask,
295
+ prompt_masks=prompt_masks,
296
+ )
297
+
298
+ return outputs
299
+
300
+ def _llm_forward(
301
+ self,
302
+ pixel_values: torch.FloatTensor,
303
+ input_ids: torch.LongTensor = None,
304
+ attention_mask: Optional[torch.Tensor] = None,
305
+ position_ids: Optional[torch.LongTensor] = None,
306
+ image_flags: Optional[torch.LongTensor] = None,
307
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
308
+ labels: Optional[torch.LongTensor] = None,
309
+ use_cache: Optional[bool] = None,
310
+ output_attentions: Optional[bool] = None,
311
+ output_hidden_states: Optional[bool] = None,
312
+ return_dict: Optional[bool] = None,
313
+ vp_overall_mask=None,
314
+ prompt_masks=None,
315
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
316
+ return_dict = return_dict if return_dict is not None \
317
+ else self.config.use_return_dict
318
+
319
+ image_flags = image_flags.squeeze(-1)
320
+ # We only added the clone code here to avoid the error.
321
+ input_embeds = self.language_model.get_input_embeddings()(
322
+ input_ids).clone()
323
+
324
+ vit_embeds = self.extract_feature(pixel_values)
325
+ vit_embeds = vit_embeds.to(input_embeds.dtype) # FIXME: why vit_embeds is float16?
326
+ fast_vit_embeds = None
327
+
328
+ vit_embeds = vit_embeds[image_flags == 1]
329
+ vit_batch_size = pixel_values.shape[0]
330
+
331
+ B, N, C = input_embeds.shape
332
+ input_embeds = input_embeds.reshape(B * N, C)
333
+
334
+ self._count += 1
335
+
336
+ if vp_overall_mask is not None and prompt_masks is not None:
337
+ vp_embeds = []
338
+ vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool()
339
+ prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks]
340
+
341
+ vp_overall_mask = vp_overall_mask[image_flags == 1]
342
+ overall_tile_vit_embeds = vit_embeds[vp_overall_mask] # (n_img, hw, c)
343
+
344
+ i_vp_img = 0
345
+ for i_img in range(len(vit_embeds)):
346
+ vp_embeds.append(vit_embeds[i_img].reshape(-1, C))
347
+ if vp_overall_mask[i_img]:
348
+ tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) # (hw, C)
349
+ objects_prompt_masks = prompt_masks[i_vp_img]
350
+ n_obj = len(objects_prompt_masks)
351
+ tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1)
352
+ objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1)
353
+ vp_embeds.append(tile_vit_embeds[objects_prompt_masks])
354
+ i_vp_img += 1
355
+ vp_embeds = torch.cat(vp_embeds, dim=0)
356
+ else:
357
+ vp_embeds = None
358
+
359
+ input_ids = input_ids.reshape(B * N)
360
+ selected = (input_ids == self.img_context_token_id)
361
+
362
+ if vp_embeds is None:
363
+ try:
364
+ input_embeds[selected] = vit_embeds.reshape(-1, C)
365
+ except Exception as e:
366
+ vit_embeds = vit_embeds.reshape(-1, C)
367
+ print(f'warning: {e}, input_embeds[selected].shape='
368
+ f'{input_embeds[selected].shape}, '
369
+ f'vit_embeds.shape={vit_embeds.shape}')
370
+ n_token = selected.sum()
371
+ if n_token > len(vit_embeds):
372
+ print(f"Wrong !!! {n_token} image tokens in text but only {len(vit_embeds)} vit embeds !!!")
373
+ expand_ratio = n_token // len(vit_embeds) + 1
374
+ vit_embeds = torch.cat([vit_embeds] * expand_ratio, dim=0)
375
+
376
+ input_embeds[selected] = vit_embeds[:n_token]
377
+ else:
378
+ try:
379
+ input_embeds[selected] = vp_embeds.reshape(-1, C)
380
+ except Exception as e:
381
+ vp_embeds = vp_embeds.reshape(-1, C)
382
+ print(f'warning: {e}, input_embeds[selected].shape='
383
+ f'{input_embeds[selected].shape}, '
384
+ f'vp_embeds.shape={vp_embeds.shape}')
385
+ n_token = selected.sum()
386
+ if n_token > len(vp_embeds):
387
+ print(f"Wrong !!! {n_token} image tokens in text but only {len(vp_embeds)} vit embeds !!!")
388
+ expand_ratio = n_token // len(vp_embeds) + 1
389
+ vp_embeds = torch.cat([vp_embeds] * expand_ratio, dim=0)
390
+
391
+ input_embeds[selected] = vp_embeds[:n_token]
392
+
393
+ input_embeds = input_embeds.reshape(B, N, C)
394
+
395
+ outputs = self.language_model(
396
+ inputs_embeds=input_embeds,
397
+ attention_mask=attention_mask,
398
+ position_ids=position_ids,
399
+ past_key_values=past_key_values,
400
+ use_cache=use_cache,
401
+ output_attentions=output_attentions,
402
+ output_hidden_states=output_hidden_states,
403
+ return_dict=return_dict,
404
+ )
405
+ logits = outputs.logits
406
+
407
+ loss = None
408
+ if labels is not None:
409
+ # Shift so that tokens < n predict n
410
+ shift_logits = logits[..., :-1, :].contiguous()
411
+ shift_labels = labels[..., 1:].contiguous()
412
+ # Flatten the tokens
413
+ loss_fct = CrossEntropyLoss()
414
+ shift_logits = shift_logits.view(
415
+ -1, self.language_model.config.vocab_size)
416
+ shift_labels = shift_labels.view(-1)
417
+ # Enable model parallelism
418
+ shift_labels = shift_labels.to(shift_logits.device)
419
+ loss = loss_fct(shift_logits, shift_labels)
420
+
421
+ if not return_dict:
422
+ output = (logits,) + outputs[1:]
423
+ return (loss,) + output if loss is not None else output
424
+
425
+ return CausalLMOutputWithPast(
426
+ loss=loss,
427
+ logits=logits,
428
+ past_key_values=outputs.past_key_values,
429
+ hidden_states=outputs.hidden_states,
430
+ attentions=outputs.attentions,
431
+ )
432
+
433
+ @torch.no_grad()
434
+ def generate(
435
+ self,
436
+ pixel_values: Optional[torch.FloatTensor] = None,
437
+ input_ids: Optional[torch.FloatTensor] = None,
438
+ attention_mask: Optional[torch.LongTensor] = None,
439
+ visual_features: Optional[torch.FloatTensor] = None,
440
+ generation_config: Optional[GenerationConfig] = None,
441
+ output_hidden_states: Optional[bool] = None,
442
+ return_dict: Optional[bool] = None,
443
+ fast_token_idx=None,
444
+ fast_pixel_values=None,
445
+ prompt_masks=None,
446
+ vp_overall_mask=None,
447
+ **generate_kwargs,
448
+ ) -> torch.LongTensor:
449
+ device = self.device
450
+ assert self.img_context_token_id is not None
451
+
452
+ if fast_pixel_values is not None:
453
+ assert fast_token_idx is not None
454
+ if type(fast_pixel_values) is list or fast_pixel_values.ndim == 5:
455
+ if type(fast_pixel_values) is list:
456
+ fast_pixel_values = [
457
+ x.unsqueeze(0) if x.ndim == 3 else x for x in fast_pixel_values
458
+ ]
459
+ # b*n, c, h, w
460
+ fast_pixel_values = torch.cat(
461
+ [image.to(self.model.vision_model.dtype) for image in fast_pixel_values], dim=0)
462
+
463
+ if pixel_values is not None:
464
+ if visual_features is not None:
465
+ vit_embeds = visual_features
466
+ else:
467
+ if type(pixel_values) is list or pixel_values.ndim == 5:
468
+ if type(pixel_values) is list:
469
+ pixel_values = [
470
+ x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values
471
+ ]
472
+ # b*n, c, h, w
473
+ pixel_values = torch.cat(
474
+ [image.to(self.vision_model.dtype) for image in pixel_values], dim=0)
475
+
476
+ if fast_pixel_values is not None:
477
+ n_fast_images = fast_pixel_values.shape[0]
478
+ whole_pixel_values = torch.cat([fast_pixel_values, pixel_values], dim=0)
479
+ vit_embeds = self.extract_feature(whole_pixel_values.to(device))
480
+ # vit_embeds = vit_embeds.to(input_embeds.dtype) # FIXME: why vit_embeds is float16?
481
+ fast_vit_embeds = vit_embeds[:n_fast_images] # (n_fast_images, hw, c)
482
+ _size = int(fast_vit_embeds.shape[1] ** 0.5)
483
+ fast_vit_embeds = fast_vit_embeds.reshape(fast_vit_embeds.shape[0], _size, _size,
484
+ fast_vit_embeds.shape[-1])
485
+ # pooling
486
+ fast_vit_embeds = fast_vit_embeds.permute(0, 3, 1, 2) # (n_fast_images, c, h, w)
487
+ fast_vit_embeds = self.fast_pool(fast_vit_embeds).flatten(2) # (n_fast_images, c, hw)
488
+ fast_vit_embeds = fast_vit_embeds.permute(0, 2, 1)
489
+ vit_embeds = vit_embeds[n_fast_images:]
490
+ else:
491
+ fast_vit_embeds = None
492
+ vit_embeds = self.extract_feature(pixel_values.to(device))
493
+ image_flags = torch.sum(pixel_values, dim=(1, 2, 3)) != 0
494
+ image_flags = image_flags.long()
495
+ vit_embeds = vit_embeds[image_flags == 1]
496
+
497
+ input_embeds = self.language_model.get_input_embeddings()(input_ids.to(device))
498
+ B, N, C = input_embeds.shape
499
+ input_embeds = input_embeds.reshape(B * N, C)
500
+
501
+ if vp_overall_mask is not None and prompt_masks is not None:
502
+ vp_embeds = []
503
+ vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool()
504
+ prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks]
505
+
506
+ vp_overall_mask = vp_overall_mask[image_flags == 1]
507
+ overall_tile_vit_embeds = vit_embeds[vp_overall_mask] # (n_img, hw, c)
508
+
509
+ i_vp_img = 0
510
+ for i_img in range(len(vit_embeds)):
511
+ vp_embeds.append(vit_embeds[i_img].reshape(-1, C))
512
+ if vp_overall_mask[i_img]:
513
+ tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) # (hw, C)
514
+ objects_prompt_masks = prompt_masks[i_vp_img]
515
+ n_obj = len(objects_prompt_masks)
516
+ tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1)
517
+ objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1)
518
+ vp_embeds.append(tile_vit_embeds[objects_prompt_masks])
519
+ i_vp_img += 1
520
+
521
+ vp_embeds = torch.cat(vp_embeds, dim=0)
522
+ else:
523
+ vp_embeds = None
524
+
525
+ input_ids = input_ids.reshape(B * N)
526
+ selected = (input_ids == self.img_context_token_id)
527
+ assert selected.sum() != 0
528
+ if vp_embeds is None:
529
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
530
+ else:
531
+ if len(input_embeds[selected]) != len(vp_embeds.reshape(-1, C)):
532
+ print("Shape mismatch, selected is {}, vp embeds is {} !!!" \
533
+ .format(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C))))
534
+ min_tokens = min(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C)))
535
+ input_embeds[selected][:min_tokens] = vp_embeds.reshape(-1, C)[:min_tokens].to(input_embeds.device)
536
+ else:
537
+ input_embeds[selected] = vp_embeds.reshape(-1, C).to(input_embeds.device)
538
+
539
+ if fast_vit_embeds is not None:
540
+ selected = (input_ids == fast_token_idx)
541
+ # FIXME, add repeat.
542
+ assert selected.sum() != 0
543
+ input_embeds[selected] = fast_vit_embeds.reshape(-1, C).to(input_embeds.device)
544
+
545
+ input_embeds = input_embeds.reshape(B, N, C)
546
+ else:
547
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
548
+
549
+ outputs = self.language_model.generate(
550
+ inputs_embeds=input_embeds,
551
+ attention_mask=attention_mask.to(device),
552
+ generation_config=generation_config,
553
+ output_hidden_states=output_hidden_states,
554
+ # return_dict=return_dict,
555
+ use_cache=True,
556
+ **generate_kwargs,
557
+ )
558
+
559
+ return outputs
560
+
561
+ def preparing_for_generation(self, tokenizer, max_new_tokens=2048, torch_dtype=torch.bfloat16):
562
+ # set stop criteria and generation configs for model
563
+ if not hasattr(self, 'tokenizer'):
564
+ self.tokenizer = tokenizer
565
+ self.bot_name = 'BOT'
566
+ stop_words = []
567
+ stop_words += self.template.get('STOP_WORDS', [])
568
+ stop_criteria = get_stop_criteria(
569
+ tokenizer=self.tokenizer, stop_words=stop_words)
570
+ self.stop_criteria = stop_criteria
571
+
572
+ default_generation_kwargs = dict(
573
+ max_new_tokens=max_new_tokens,
574
+ do_sample=False,
575
+ eos_token_id=self.tokenizer.eos_token_id,
576
+ pad_token_id=(
577
+ self.tokenizer.pad_token_id
578
+ if self.tokenizer.pad_token_id is not None
579
+ else self.tokenizer.eos_token_id
580
+ ),
581
+ )
582
+
583
+ self.gen_config = GenerationConfig(**default_generation_kwargs)
584
+ self.init_prediction_config = True
585
+ self.torch_dtype = torch_dtype
586
+ self.to(torch_dtype)
587
+ self.extra_image_processor = DirectResize(target_length=1024, )
588
+ # for multi image process
589
+ self.min_dynamic_patch = 1
590
+ self.max_dynamic_patch = 12
591
+ self.downsample_ratio = 0.5
592
+ self.image_size = 448
593
+ self.use_thumbnail = True
594
+ patch_size = 14
595
+ self.patch_size = patch_size
596
+
597
+ self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2))
598
+ self.IMAGENET_MEAN = (0.485, 0.456, 0.406)
599
+ self.IMAGENET_STD = (0.229, 0.224, 0.225)
600
+ self.IMG_CONTEXT_TOKEN = '<IMG_CONTEXT>'
601
+ self.IMG_START_TOKEN = '<img>'
602
+ self.IMG_END_TOKEN = '</img>'
603
+ self.FAST_IMG_CONTEXT_TOKEN = '<FAST_IMG_CONTEXT>'
604
+ self.FAST_IMG_START_TOKEN = '<fast_img>'
605
+ self.FAST_IMG_END_TOKEN = '</fast_img>'
606
+
607
+ self.transformer = T.Compose([
608
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
609
+ T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC),
610
+ T.ToTensor(),
611
+ T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD)
612
+ ])
613
+ self.VP_START_TOKEN = '<vp>'
614
+ self.VP_END_TOKEN = '</vp>'
615
+
616
+ # change phi3 prepare for generation fuction
617
+ if self.config.llm_config.architectures[0] == 'Phi3ForCausalLM':
618
+ self.language_model.prepare_inputs_for_generation = MethodType(prepare_inputs_for_generation_phi3, self.language_model)
619
+
620
+ img_context_token_id = tokenizer.convert_tokens_to_ids('<IMG_CONTEXT>')
621
+ self.img_context_token_id = img_context_token_id
622
+ self.seg_token_idx = tokenizer.convert_tokens_to_ids('[SEG]')
623
+ self.fast_token_idx = tokenizer.convert_tokens_to_ids('<FAST_IMG_CONTEXT>')
624
+ return
625
+
626
+ def predict_forward(
627
+ self,
628
+ image=None,
629
+ video=None,
630
+ all_video=None,
631
+ fast_video=None,
632
+ text=None,
633
+ past_text='',
634
+ mask_prompts=None,
635
+ tokenizer=None,
636
+ temporal_msg=None,
637
+ fast_temporal_msg=None,
638
+ timestamp_list=None
639
+ ):
640
+ if not self.init_prediction_config:
641
+ assert tokenizer
642
+ self.preparing_for_generation(tokenizer=tokenizer)
643
+
644
+ if image is None and video is None and '<image>' not in past_text:
645
+ text = text.replace('<image>', "")
646
+ input_text = ''
647
+ input_text += self.template['INSTRUCTION'].format(
648
+ input=text, round=1, bot_name=self.bot_name)
649
+ input_text = past_text + input_text
650
+ ids = self.tokenizer.encode(input_text)
651
+ ids = torch.tensor(ids).cuda().unsqueeze(0)
652
+
653
+ attention_mask = torch.ones_like(ids, dtype=torch.bool)
654
+
655
+ mm_inputs = {
656
+ 'pixel_values': None,
657
+ 'input_ids': ids,
658
+ 'attention_mask': attention_mask,
659
+ 'position_ids': None,
660
+ 'past_key_values': None,
661
+ 'labels': None,
662
+ 'prompt_masks': None,
663
+ 'vp_overall_mask': None,
664
+ }
665
+ ret_masks = []
666
+ else:
667
+ input_dict = {}
668
+ fast_pixel_values = None
669
+ if video is not None:
670
+ pixel_values = []
671
+ fast_pixel_values = []
672
+ extra_pixel_values = []
673
+ ori_image_size = video[0].size
674
+
675
+ if all_video==None:
676
+ all_video = video
677
+ for frame_idx, frame_image in enumerate(all_video):
678
+ assert ori_image_size == frame_image.size
679
+ g_image = np.array(frame_image) # for grounding
680
+ g_image = self.extra_image_processor.apply_image(g_image)
681
+ g_image = torch.from_numpy(g_image).permute(2, 0, 1).contiguous()
682
+ extra_pixel_values.append(g_image)
683
+
684
+ #for frame_idx in np.linspace(0, len(video)-1, 5, dtype=int):
685
+ for frame_idx in np.linspace(0, len(video)-1, min(5, len(video)), dtype=int):
686
+ frame_image = video[frame_idx]
687
+ img = self.transformer(frame_image)
688
+ pixel_values.append(img)
689
+
690
+ for frame_idx in np.linspace(0, len(fast_video)-1, min(128, len(fast_video)), dtype=int):
691
+ frame_image = fast_video[frame_idx]
692
+ img = self.transformer(frame_image)
693
+ fast_pixel_values.append(img)
694
+
695
+ pixel_values = torch.stack(pixel_values, dim=0).to(self.torch_dtype) # (n_f, 3, h, w)
696
+ fast_pixel_values = torch.stack(fast_pixel_values, dim=0).to(self.torch_dtype)
697
+ g_pixel_values = torch.stack([
698
+ self.grounding_encoder.preprocess_image(pixel) for pixel in extra_pixel_values
699
+ ]).to(self.torch_dtype)
700
+ num_image_tokens = self.patch_token
701
+ num_frames = len(pixel_values)
702
+ num_frames_fast = len(fast_pixel_values)
703
+
704
+ input_dict['vp_overall_mask'] = None
705
+ else:
706
+ ori_image_size = image.size
707
+
708
+ # prepare grounding images
709
+ g_image = np.array(image) # for grounding
710
+ g_image = self.extra_image_processor.apply_image(g_image)
711
+ g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous().to(self.torch_dtype)
712
+ extra_pixel_values = [g_pixel_values]
713
+ g_pixel_values = torch.stack([
714
+ self.grounding_encoder.preprocess_image(pixel) for pixel in extra_pixel_values
715
+ ]).to(self.torch_dtype)
716
+
717
+ images = dynamic_preprocess(image, self.min_dynamic_patch,
718
+ self.max_dynamic_patch,
719
+ self.image_size, self.use_thumbnail)
720
+
721
+ if mask_prompts is not None:
722
+ vp_overall_mask = torch.Tensor([False] * (len(images) - 1) + [True])
723
+ input_dict['vp_overall_mask'] = vp_overall_mask
724
+ else:
725
+ input_dict['vp_overall_mask'] = None
726
+
727
+ pixel_values = [self.transformer(image) for image in images]
728
+ pixel_values = torch.stack(pixel_values).to(self.torch_dtype)
729
+ num_image_tokens = pixel_values.shape[0] * self.patch_token
730
+ num_frames = 1
731
+ num_frames_fast = 0
732
+ fast_pixel_values = None
733
+ input_dict['g_pixel_values'] = g_pixel_values
734
+ input_dict['pixel_values'] = pixel_values
735
+ input_dict['fast_pixel_values'] = fast_pixel_values
736
+
737
+ if mask_prompts is not None:
738
+ # reshape mask prompts to feature size
739
+ mask_prompts = [torch.Tensor(item).to(pixel_values.device) for item in mask_prompts]
740
+ mask_prompts = [F.interpolate(
741
+ item.unsqueeze(0),
742
+ size=(int(self.image_size // self.patch_size * self.downsample_ratio),
743
+ int(self.image_size // self.patch_size * self.downsample_ratio)),
744
+ mode='nearest').squeeze(0) for item in mask_prompts]
745
+ region_pixels = []
746
+ for mask_prompt in mask_prompts[0]:
747
+ region_pixels.append(mask_prompt.bool().to(torch.int64).sum())
748
+
749
+ vp_token_str = '\nThere are {} part regions in the picture: '.format(len(mask_prompts[0]))
750
+ for i in range(len(mask_prompts[0])):
751
+ vp_token_str = vp_token_str + \
752
+ f"region{i + 1}" + self.VP_START_TOKEN + \
753
+ self.IMG_CONTEXT_TOKEN * region_pixels[i] + \
754
+ self.VP_END_TOKEN
755
+ if i == len(mask_prompts[0]) - 1:
756
+ vp_token_str = vp_token_str + '.\n'
757
+ else:
758
+ vp_token_str = vp_token_str + ', '
759
+ else:
760
+ vp_token_str = ''
761
+
762
+ if fast_pixel_values==None:
763
+ image_token_str = f'{self.IMG_START_TOKEN}' \
764
+ f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \
765
+ f'{self.IMG_END_TOKEN}'
766
+ frame_tokens = image_token_str * num_frames
767
+ else:
768
+ fast_frame_token_str = f'{self.FAST_IMG_START_TOKEN}' \
769
+ f'{self.FAST_IMG_CONTEXT_TOKEN * self.fast_pool_size * self.fast_pool_size}' \
770
+ f'{self.FAST_IMG_END_TOKEN}'
771
+
772
+ image_token_str = f'{self.IMG_START_TOKEN}' \
773
+ f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \
774
+ f'{self.IMG_END_TOKEN}'
775
+
776
+ frame_tokens = ''
777
+ frame_tokens += fast_temporal_msg
778
+ for timestamp in timestamp_list:
779
+ frame_tokens += (fast_frame_token_str+timestamp)
780
+ frame_tokens += temporal_msg
781
+ frame_tokens += image_token_str * num_frames
782
+
783
+ ret_masks = []
784
+
785
+ if '<image>' in text or mask_prompts is not None:
786
+ assert past_text is None or len(past_text) == 0
787
+ text = text.replace('<image>', frame_tokens + vp_token_str)
788
+ input_text = ''
789
+ input_text += self.template['INSTRUCTION'].format(
790
+ input=text, round=1, bot_name=self.bot_name)
791
+ input_text = past_text + input_text
792
+ #print('input_text', input_text)
793
+ ids = self.tokenizer.encode(input_text)
794
+ ids = torch.tensor(ids).cuda().unsqueeze(0)
795
+
796
+ attention_mask = torch.ones_like(ids, dtype=torch.bool)
797
+
798
+ mm_inputs = {
799
+ 'pixel_values': input_dict['pixel_values'],
800
+ 'input_ids': ids,
801
+ 'attention_mask': attention_mask,
802
+ 'position_ids': None,
803
+ 'past_key_values': None,
804
+ 'labels': None,
805
+ 'prompt_masks': mask_prompts,
806
+ 'vp_overall_mask': input_dict['vp_overall_mask'],
807
+ 'fast_pixel_values': input_dict['fast_pixel_values'],
808
+ 'fast_token_idx': self.fast_token_idx,
809
+ }
810
+
811
+ generate_output = self.generate(
812
+ **mm_inputs,
813
+ generation_config=self.gen_config,
814
+ streamer=None,
815
+ bos_token_id=self.tokenizer.bos_token_id,
816
+ stopping_criteria=self.stop_criteria,
817
+ output_hidden_states=True,
818
+ return_dict_in_generate=True
819
+ )
820
+ predict = self.tokenizer.decode(
821
+ generate_output.sequences[0], skip_special_tokens=False).strip()
822
+
823
+ if image is None and video is None and '<image>' not in past_text:
824
+ return {'prediction': predict, 'prediction_masks': ret_masks, }
825
+
826
+ # if have seg result, find the seg hidden states
827
+ hidden_states = generate_output.hidden_states
828
+ last_hidden_states = [item[-1][0] for item in hidden_states]
829
+ last_hidden_states = torch.cat(last_hidden_states, dim=0)
830
+ seg_hidden_states = get_seg_hidden_states(
831
+ last_hidden_states, generate_output.sequences[0][:-1],
832
+ seg_id=self.seg_token_idx
833
+ )
834
+ all_seg_hidden_states = self.text_hidden_fcs(seg_hidden_states)
835
+
836
+ for seg_hidden_states in all_seg_hidden_states:
837
+ seg_hidden_states = seg_hidden_states.unsqueeze(0)
838
+ g_pixel_values = input_dict['g_pixel_values']
839
+ sam_states = self.grounding_encoder.get_sam2_embeddings(g_pixel_values)
840
+ pred_masks = self.grounding_encoder.language_embd_inference(sam_states, [seg_hidden_states] * num_frames)
841
+ w, h = ori_image_size
842
+ masks = F.interpolate(pred_masks, size=(h, w), mode='bilinear', align_corners=False)
843
+ masks = masks[:, 0]
844
+ masks = masks.sigmoid() > 0.5
845
+ masks = masks.cpu().numpy()
846
+ ret_masks.append(masks)
847
+
848
+ return {'prediction': predict, 'prediction_masks': ret_masks,}
849
+
850
+ def get_seg_hidden_states(hidden_states, output_ids, seg_id):
851
+ seg_mask = output_ids == seg_id
852
+ n_out = len(seg_mask)
853
+ if n_out == 0:
854
+ return hidden_states[0:0]
855
+ return hidden_states[-n_out:][seg_mask]
856
+
857
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height,
858
+ image_size):
859
+ best_ratio_diff = float('inf')
860
+ best_ratio = (1, 1)
861
+ area = width * height
862
+ for ratio in target_ratios:
863
+ target_aspect_ratio = ratio[0] / ratio[1]
864
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
865
+ if ratio_diff < best_ratio_diff:
866
+ best_ratio_diff = ratio_diff
867
+ best_ratio = ratio
868
+ elif ratio_diff == best_ratio_diff:
869
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
870
+ best_ratio = ratio
871
+ return best_ratio
872
+
873
+ def dynamic_preprocess(image,
874
+ min_num=1,
875
+ max_num=6,
876
+ image_size=448,
877
+ use_thumbnail=False):
878
+ orig_width, orig_height = image.size
879
+ aspect_ratio = orig_width / orig_height
880
+
881
+ # calculate the existing image aspect ratio
882
+ target_ratios = {(i, j)
883
+ for n in range(min_num, max_num + 1)
884
+ for i in range(1, n + 1) for j in range(1, n + 1)
885
+ if i * j <= max_num and i * j >= min_num}
886
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
887
+
888
+ # find the closest aspect ratio to the target
889
+ target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio,
890
+ target_ratios, orig_width,
891
+ orig_height, image_size)
892
+
893
+ # calculate the target width and height
894
+ target_width = image_size * target_aspect_ratio[0]
895
+ target_height = image_size * target_aspect_ratio[1]
896
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
897
+
898
+ # resize the image
899
+ resized_img = image.resize((target_width, target_height))
900
+ processed_images = []
901
+ for i in range(blocks):
902
+ box = ((i % (target_width // image_size)) * image_size,
903
+ (i // (target_width // image_size)) * image_size,
904
+ ((i % (target_width // image_size)) + 1) * image_size,
905
+ ((i // (target_width // image_size)) + 1) * image_size)
906
+ # split the image
907
+ split_img = resized_img.crop(box)
908
+ processed_images.append(split_img)
909
+ assert len(processed_images) == blocks
910
+ if use_thumbnail and len(processed_images) != 1:
911
+ thumbnail_img = image.resize((image_size, image_size))
912
+ processed_images.append(thumbnail_img)
913
+ return processed_images
914
+
915
+
916
+ from transformers.cache_utils import Cache, DynamicCache
917
+
918
+ def prepare_inputs_for_generation_phi3(
919
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
920
+ ):
921
+ if past_key_values is not None:
922
+ if isinstance(past_key_values, Cache):
923
+ cache_length = past_key_values.get_seq_length()
924
+ past_length = past_key_values.seen_tokens
925
+ max_cache_length = past_key_values.get_max_length()
926
+ else:
927
+ cache_length = past_length = past_key_values[0][0].shape[2]
928
+ max_cache_length = None
929
+
930
+ # Keep only the unprocessed tokens:
931
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
932
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
933
+ # input)
934
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
935
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length):]
936
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
937
+ # input_ids based on the past_length.
938
+ elif past_length < input_ids.shape[1]:
939
+ input_ids = input_ids[:, past_length:]
940
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
941
+
942
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
943
+ if (
944
+ max_cache_length is not None
945
+ and attention_mask is not None
946
+ and cache_length + input_ids.shape[1] > max_cache_length
947
+ ):
948
+ attention_mask = attention_mask[:, -max_cache_length:]
949
+
950
+ position_ids = kwargs.get('position_ids', None)
951
+ if attention_mask is not None and position_ids is None:
952
+ # create position_ids on the fly for batch generation
953
+ position_ids = attention_mask.long().cumsum(-1) - 1
954
+ position_ids.masked_fill_(attention_mask == 0, 1)
955
+ if past_key_values:
956
+ position_ids = position_ids[:, -input_ids.shape[1]:]
957
+
958
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
959
+ if inputs_embeds is not None and (past_key_values is None or len(past_key_values)==0):
960
+ model_inputs = {'inputs_embeds': inputs_embeds}
961
+ else:
962
+ model_inputs = {'input_ids': input_ids}
963
+
964
+ model_inputs.update(
965
+ {
966
+ 'position_ids': position_ids,
967
+ 'past_key_values': past_key_values,
968
+ 'use_cache': kwargs.get('use_cache'),
969
+ 'attention_mask': attention_mask,
970
+ }
971
+ )
972
+ return model_inputs
973
+
sam2.py ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenization_internlm2_fast.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama_fast.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization Fast class for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, Optional, Tuple
21
+
22
+ from tokenizers import Tokenizer, decoders, normalizers, processors
23
+ from tokenizers.models import BPE
24
+ from transformers.convert_slow_tokenizer import (SLOW_TO_FAST_CONVERTERS,
25
+ SentencePieceExtractor,
26
+ SpmConverter)
27
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
28
+ from transformers.utils import logging
29
+
30
+ from .tokenization_internlm2 import InternLM2Tokenizer
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
35
+
36
+
37
+ # Modified from transformers.convert_slow_tokenizer.LlamaConverter
38
+ class InternLM2Converter(SpmConverter):
39
+ handle_byte_fallback = True
40
+
41
+ def vocab(self, proto):
42
+ vocab = [
43
+ ('<unk>', 0.0),
44
+ ('<s>', 0.0),
45
+ ('</s>', 0.0),
46
+ ]
47
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
48
+ return vocab
49
+
50
+ def unk_id(self, proto):
51
+ unk_id = 0
52
+ return unk_id
53
+
54
+ def decoder(self, replacement, add_prefix_space):
55
+ return decoders.Sequence(
56
+ [
57
+ decoders.Replace('▁', ' '),
58
+ decoders.ByteFallback(),
59
+ decoders.Fuse(),
60
+ decoders.Strip(content=' ', left=1),
61
+ ]
62
+ )
63
+
64
+ def tokenizer(self, proto):
65
+ model_type = proto.trainer_spec.model_type
66
+ vocab_scores = self.vocab(proto)
67
+ # special tokens
68
+ added_tokens = self.original_tokenizer.added_tokens_decoder
69
+ for i in range(len(vocab_scores)):
70
+ piece, score = vocab_scores[i]
71
+ if i in added_tokens:
72
+ vocab_scores[i] = (added_tokens[i].content, score)
73
+ if model_type == 1:
74
+ raise RuntimeError('InternLM2 is supposed to be a BPE model!')
75
+
76
+ elif model_type == 2:
77
+ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
78
+ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
79
+ tokenizer = Tokenizer(
80
+ BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True)
81
+ )
82
+ tokenizer.add_special_tokens(
83
+ [ added_token for index, added_token in added_tokens.items()]
84
+ )
85
+ else:
86
+ raise Exception(
87
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
88
+ )
89
+
90
+ return tokenizer
91
+
92
+ def normalizer(self, proto):
93
+ normalizers_list = []
94
+ if proto.normalizer_spec.add_dummy_prefix:
95
+ normalizers_list.append(normalizers.Prepend(prepend='▁'))
96
+ normalizers_list.append(normalizers.Replace(pattern=' ', content='▁'))
97
+ return normalizers.Sequence(normalizers_list)
98
+
99
+ def pre_tokenizer(self, replacement, add_prefix_space):
100
+ return None
101
+
102
+
103
+ SLOW_TO_FAST_CONVERTERS['InternLM2Tokenizer'] = InternLM2Converter
104
+
105
+
106
+ # Modified from transformers.model.llama.tokenization_llama_fast.LlamaTokenizerFast -> InternLM2TokenizerFast
107
+ class InternLM2TokenizerFast(PreTrainedTokenizerFast):
108
+ vocab_files_names = VOCAB_FILES_NAMES
109
+ slow_tokenizer_class = InternLM2Tokenizer
110
+ padding_side = 'left'
111
+ model_input_names = ['input_ids', 'attention_mask']
112
+ _auto_class = 'AutoTokenizer'
113
+
114
+ def __init__(
115
+ self,
116
+ vocab_file,
117
+ unk_token='<unk>',
118
+ bos_token='<s>',
119
+ eos_token='</s>',
120
+ pad_token='</s>',
121
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
122
+ add_bos_token=True,
123
+ add_eos_token=False,
124
+ decode_with_prefix_space=False,
125
+ clean_up_tokenization_spaces=False,
126
+ **kwargs,
127
+ ):
128
+ super().__init__(
129
+ vocab_file=vocab_file,
130
+ unk_token=unk_token,
131
+ bos_token=bos_token,
132
+ eos_token=eos_token,
133
+ pad_token=pad_token,
134
+ sp_model_kwargs=sp_model_kwargs,
135
+ add_bos_token=add_bos_token,
136
+ add_eos_token=add_eos_token,
137
+ decode_with_prefix_space=decode_with_prefix_space,
138
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
139
+ **kwargs,
140
+ )
141
+ self._add_bos_token = add_bos_token
142
+ self._add_eos_token = add_eos_token
143
+ self.update_post_processor()
144
+ self.vocab_file = vocab_file
145
+
146
+ @property
147
+ def can_save_slow_tokenizer(self) -> bool:
148
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
149
+
150
+ def update_post_processor(self):
151
+ """
152
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
153
+ """
154
+ bos = self.bos_token
155
+ bos_token_id = self.bos_token_id
156
+ if bos is None and self.add_bos_token:
157
+ raise ValueError('add_bos_token = True but bos_token = None')
158
+
159
+ eos = self.eos_token
160
+ eos_token_id = self.eos_token_id
161
+ if eos is None and self.add_eos_token:
162
+ raise ValueError('add_eos_token = True but eos_token = None')
163
+
164
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
165
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
166
+
167
+ special_tokens = []
168
+ if self.add_bos_token:
169
+ special_tokens.append((bos, bos_token_id))
170
+ if self.add_eos_token:
171
+ special_tokens.append((eos, eos_token_id))
172
+ self._tokenizer.post_processor = processors.TemplateProcessing(
173
+ single=single, pair=pair, special_tokens=special_tokens
174
+ )
175
+
176
+ @property
177
+ def add_eos_token(self):
178
+ return self._add_eos_token
179
+
180
+ @property
181
+ def add_bos_token(self):
182
+ return self._add_bos_token
183
+
184
+ @add_eos_token.setter
185
+ def add_eos_token(self, value):
186
+ self._add_eos_token = value
187
+ self.update_post_processor()
188
+
189
+ @add_bos_token.setter
190
+ def add_bos_token(self, value):
191
+ self._add_bos_token = value
192
+ self.update_post_processor()
193
+
194
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
195
+ if not self.can_save_slow_tokenizer:
196
+ raise ValueError(
197
+ 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
198
+ 'tokenizer.'
199
+ )
200
+
201
+ if not os.path.isdir(save_directory):
202
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
203
+ return
204
+ out_vocab_file = os.path.join(
205
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
206
+ )
207
+
208
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
209
+ copyfile(self.vocab_file, out_vocab_file)
210
+
211
+ return (out_vocab_file,)
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
vocab.json ADDED
The diff for this file is too large to render. See raw diff