assafvayner HF Staff commited on
Commit
ac7fab4
·
verified ·
1 Parent(s): 9d85764

Upload folder

Browse files
.claude/settings.local.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(python3 -m json.tool)",
5
+ "Read(//Users/assafvayner/.claude/**)",
6
+ "Bash(python3 -c \"import json,sys; data=json.load\\(sys.stdin\\); [print\\(json.dumps\\(p, indent=2\\)\\) for p in data if 'huggingface-infra' in json.dumps\\(p\\)]\")"
7
+ ]
8
+ }
9
+ }
.gitattributes CHANGED
@@ -1,6 +1,5 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.avro filter=lfs diff=lfs merge=lfs -text
4
  *.bin filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
6
  *.ckpt filter=lfs diff=lfs merge=lfs -text
@@ -9,8 +8,6 @@
9
  *.h5 filter=lfs diff=lfs merge=lfs -text
10
  *.joblib filter=lfs diff=lfs merge=lfs -text
11
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
- *.lz4 filter=lfs diff=lfs merge=lfs -text
13
- *.mds filter=lfs diff=lfs merge=lfs -text
14
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
  *.model filter=lfs diff=lfs merge=lfs -text
16
  *.msgpack filter=lfs diff=lfs merge=lfs -text
@@ -36,34 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  *.zip filter=lfs diff=lfs merge=lfs -text
37
  *.zst filter=lfs diff=lfs merge=lfs -text
38
  *tfevents* filter=lfs diff=lfs merge=lfs -text
39
- # Audio files - uncompressed
40
- *.pcm filter=lfs diff=lfs merge=lfs -text
41
- *.sam filter=lfs diff=lfs merge=lfs -text
42
- *.raw filter=lfs diff=lfs merge=lfs -text
43
- # Audio files - compressed
44
- *.aac filter=lfs diff=lfs merge=lfs -text
45
- *.flac filter=lfs diff=lfs merge=lfs -text
46
- *.mp3 filter=lfs diff=lfs merge=lfs -text
47
- *.ogg filter=lfs diff=lfs merge=lfs -text
48
- *.wav filter=lfs diff=lfs merge=lfs -text
49
- # Image files - uncompressed
50
- *.bmp filter=lfs diff=lfs merge=lfs -text
51
- *.gif filter=lfs diff=lfs merge=lfs -text
52
- *.png filter=lfs diff=lfs merge=lfs -text
53
- *.tiff filter=lfs diff=lfs merge=lfs -text
54
- # Image files - compressed
55
- *.jpg filter=lfs diff=lfs merge=lfs -text
56
- *.jpeg filter=lfs diff=lfs merge=lfs -text
57
- *.webp filter=lfs diff=lfs merge=lfs -text
58
- # Video files - compressed
59
- *.mp4 filter=lfs diff=lfs merge=lfs -text
60
- *.webm filter=lfs diff=lfs merge=lfs -text
61
- blobs/96805d61fbb9523fd27a09ab40451d04da09e9ba4b102341eac0184d8f82a0b1 filter=lfs diff=lfs merge=lfs -text
62
- blobs/e2a59915dd6a1c51ccb11be3addf4585fcf0840ac4f63f8e9fb629db58f8db6e filter=lfs diff=lfs merge=lfs -text
63
- blobs/39ab057316af49c3d81c67b80a98d72727ce686ac68ae72ce71a05fc5297b856 filter=lfs diff=lfs merge=lfs -text
64
- blobs/e1219ef85875905368b39e3fe383d72fc6539ade5abf81f7cedf94a19275a345 filter=lfs diff=lfs merge=lfs -text
65
- blobs/9ce20192fbe0d521d100521f1e0836c415debacb615b89f7658178420822e710 filter=lfs diff=lfs merge=lfs -text
66
- blobs/1fa4abb1ce00765aa78c4714c71c65c57e46706564aa8f908e78d7c6fa51d07e filter=lfs diff=lfs merge=lfs -text
67
- blobs/f78e77184ab4f3d53e28b85ffda8b328185cc40fcfaab59d17c3df0f2524621e.incomplete filter=lfs diff=lfs merge=lfs -text
68
- blobs/a5ad94eb2fc98b41bd89b8cf33f446e86c472ccd15382c0f17a53028af90258d filter=lfs diff=lfs merge=lfs -text
69
- blobs/f78e77184ab4f3d53e28b85ffda8b328185cc40fcfaab59d17c3df0f2524621e filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
 
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
  *.ckpt filter=lfs diff=lfs merge=lfs -text
 
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
11
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: wilddet3d
3
+ license: other
4
+ license_name: sam-license
5
+ license_link: https://github.com/facebookresearch/sam3/blob/main/LICENSE
6
+ pipeline_tag: object-detection
7
+ tags:
8
+ - 3d-detection
9
+ - monocular
10
+ - open-vocabulary
11
+ - 3d-object-detection
12
+ - object-detection
13
+ - depth-estimation
14
+ - computer-vision
15
+ - grounding
16
+ - segmentation
17
+ - point-cloud
18
+ - 3d-bounding-box
19
+ - zero-shot
20
+ - promptable
21
+ - pytorch
22
+ datasets:
23
+ - allenai/WildDet3D-Data
24
+ language:
25
+ - en
26
+ ---
27
+
28
+ # WildDet3D: Scaling Promptable 3D Detection in the Wild
29
+
30
+ **WildDet3D** is a promptable monocular 3D object detection model that detects and localizes objects in 3D from a single RGB image. It supports **text**, **box**, and **point** prompts for open-vocabulary 3D detection across diverse in-the-wild scenes.
31
+
32
+ **Authors:** Weikai Huang, Jieyu Zhang, Sijun Li, Taoyang Jia, Jiafei Duan, Yunqian Cheng, Jaemin Cho, Matthew Wallingford, Rustin Soraki, Chris Dongjoo Kim, Shuo Liu, Donovan Clay, Taira Anderson, Winson Han, Ali Farhadi, Bharath Hariharan, Zhongzheng Ren, Ranjay Krishna
33
+
34
+ **Affiliations:** Allen Institute for AI (Ai2), University of Washington, Cornell University, UNC-Chapel Hill
35
+
36
+ ## Model Details
37
+
38
+ | Property | Value |
39
+ |---|---|
40
+ | Backbone | SAM3 ViT (1024-dim, 32 blocks, patch 14) |
41
+ | Depth Backend | LingBot-Depth (DINOv2 ViT-L/14) |
42
+ | Parameters | ~1.2B |
43
+ | Input | RGB image + camera intrinsics (optional) + sparse/dense depth (optional) |
44
+ | Output | 2D boxes, 3D boxes, depth maps, predicted intrinsics |
45
+ | Prompt Types | Text, Box (visual/geometric), Point |
46
+ | License | SAM License |
47
+
48
+ When camera intrinsics are not available (e.g., in-the-wild images), the model can predict intrinsics internally. When sparse or dense depth (e.g., from LiDAR) is provided, it is fused for improved 3D localization.
49
+
50
+ ## Citation
51
+
52
+ ```bibtex
53
+ @article{wilddet3d,
54
+ title={WildDet3D: Scaling Promptable 3D Detection in the Wild},
55
+ author={Huang, Weikai and Zhang, Jieyu and Li, Sijun and Jia, Taoyang and Duan, Jiafei and Cheng, Yunqian and Cho, Jaemin and Wallingford, Matthew and Soraki, Rustin and Kim, Chris Dongjoo and Liu, Shuo and Clay, Donovan and Anderson, Taira and Han, Winson and Farhadi, Ali and Hariharan, Bharath and Ren, Zhongzheng and Krishna, Ranjay},
56
+ year={2026},
57
+ }
58
+ ```
59
+
60
+ ## License
61
+
62
+ This model uses SAM 3 and LingBot-Depth weights, and is licensed under the [SAM License](https://github.com/facebookresearch/sam3/blob/main/LICENSE). This model is intended for research and educational use in accordance with Ai2's [Responsible Use Guidelines](https://allenai.org/responsible-use).
added_tokens.json ADDED
The diff for this file is too large to render. See raw diff
 
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% set DEMO_STYLES = ['point_count','pointing','cosyn_point','user_qa','long_caption','short_caption','video_long_caption','video_short_caption','video_point_track_per_frame','video_point_track_start_end','video_point_track_all_frames','video_single_point_track_start_end','video_transcript','video_clip_caption_start_end','video_clip_caption_start_end_in_seconds','video_clip_transcript_start_end','video_clip_transcript_start_end_in_seconds','video_frame_caption_timestamp','video_frame_caption_timestamp_in_seconds','correction_qa','text_sft','video_point','video_point_count','video_count','video_count_point','multi_image_pointing','multi_image_counting','multi_image_point_then_count','multi_image_count_then_point','demo','a_okvqa_mc','ai2_diagram_no_letter','ai2_diagram','science_qa','multi_image_mc','multi_image_mc_exp','mantis_instruct_mc','video_multiple_choice','video_multiple_choice_count_without_pointing','video_multiple_choice_multiple_correct','video_multiple_choice_w_subtitle'] %}{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% set has_subtitle = messages and messages[0]['role'].lower() == 'subtitle' %}{% for message in messages %}{% if message['content'] is not string %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% elif content['type'] == 'video' or 'video' in content or 'video_url' in content %}{% set video_count.value = video_count.value + 1 %}{% endif %}{% endfor %}{% endif %}{% endfor %}{% if image_count.value == 1 %}{{ '<|image|>' }}{% elif image_count.value > 1 %}{% for i in range(image_count.value) %}{{ 'Image ' ~ (i + 1) ~ '<|image|>' }}{% endfor %}{% endif %}{% for _ in range(video_count.value) %}{{ '<|video|>' }}{% endfor %}{% if has_subtitle %}{{ messages[0]['content'] }}{% endif %}{% for message in messages %}{% set role = message['role'].lower() %}{% if role == 'subtitle' %}{% continue %}{% endif %}{% set conv_index = loop.index - (1 if has_subtitle else 0) %}{%- if (conv_index % 2 == 1 and role != 'user') or (conv_index % 2 == 0 and role != 'assistant') -%}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{%- endif -%}{% if message['content'] is string %}{% set text_content = message['content'] %}{% else %}{% set m = namespace(text='') %}{% for content in message['content'] %}{% if content['type'] == 'text' %}{% if content['style'] is defined and content['style'] not in DEMO_STYLES %}{% set seg = content['style'] ~ ': ' ~ content['text'] %}{% else %}{% set seg = content['text'] %}{% endif %}{% set m.text = m.text ~ ('' if not m.text else ' ') ~ seg %}{% endif %}{% endfor %}{% set text_content = m.text %}{% endif %}{% if role == 'user' %}{% if not (has_subtitle and loop.index == 2) and not (not has_subtitle and loop.first) %}{{ '<|im_end|>\n' }}{% endif %}{{ '<|im_start|>user\n' }}{{ text_content }}{{ '<|im_end|>\n' }}{% else %} {# assistant #}{{ '<|im_start|>assistant\n' }}{{ text_content }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}
config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "wilddet3d",
3
+ "backbone": "sam3_vit",
4
+ "depth_backend": "lingbot_depth_dinov2_vitl14",
5
+ "parameters": "1.2B"
6
+ }
configuration_molmo2.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Molmo2 configuration
3
+ """
4
+
5
+ from typing import Optional, Any
6
+
7
+ from transformers import PretrainedConfig
8
+ from transformers.modeling_rope_utils import rope_config_validation
9
+ from transformers.utils import logging
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ class Molmo2VitConfig(PretrainedConfig):
15
+ r"""
16
+ This is the configuration class to store the configuration of a [`Molmo2VisionTransformer`].
17
+ It is used to instantiate a `Molmo2VisionTransformer` according to the specified arguments,
18
+ defining the model architecture.
19
+
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+
23
+ Example:
24
+ ```python
25
+ >>> from transformers import Molmo2VitConfig, Molmo2VisionTransformer
26
+
27
+ >>> # Initializing a Molmo2VitConfig
28
+ >>> configuration = Molmo2VitConfig()
29
+
30
+ >>> # Initializing a Molmo2VisionTransformer (with random weights)
31
+ >>> model = Molmo2VisionTransformer(configuration)
32
+
33
+ >>> # Accessing the model configuration
34
+ >>> configuration = model.config
35
+ ```"""
36
+
37
+ model_type = "molmo2"
38
+ base_config_key = "vit_config"
39
+
40
+ def __init__(
41
+ self,
42
+ hidden_size: int = 1152,
43
+ intermediate_size: int = 4304,
44
+ num_hidden_layers: int = 27,
45
+ num_attention_heads: int = 16,
46
+ num_key_value_heads: int = 16,
47
+ head_dim: int = 72,
48
+ hidden_act: str = "gelu_pytorch_tanh",
49
+ layer_norm_eps: float = 1e-6,
50
+ image_default_input_size: tuple[int, int] = (378, 378),
51
+ image_patch_size: int = 14,
52
+ image_num_pos: int = 577,
53
+ attention_dropout: float = 0.0,
54
+ residual_dropout: float = 0.0,
55
+ initializer_range: float = 0.02,
56
+ float32_attention: bool = True,
57
+ attn_implementation: str = "eager",
58
+ **kwargs,
59
+ ):
60
+ self.attn_implementation = attn_implementation
61
+ super().__init__(
62
+ attn_implementation=attn_implementation,
63
+ **kwargs
64
+ )
65
+ self.hidden_size = hidden_size
66
+ self.intermediate_size = intermediate_size
67
+ self.num_hidden_layers = num_hidden_layers
68
+ self.num_attention_heads = num_attention_heads
69
+ self.num_key_value_heads = num_key_value_heads
70
+ self.head_dim = head_dim
71
+ self.hidden_act = hidden_act
72
+ self.layer_norm_eps = layer_norm_eps
73
+ self.image_default_input_size = image_default_input_size
74
+ self.image_patch_size = image_patch_size
75
+ self.image_num_pos = image_num_pos
76
+ self.attention_dropout = attention_dropout
77
+ self.residual_dropout = residual_dropout
78
+ self.initializer_range = initializer_range
79
+ self.float32_attention = float32_attention
80
+
81
+ @property
82
+ def image_num_patch(self):
83
+ h, w = self.image_default_input_size
84
+ return h // self.image_patch_size, w // self.image_patch_size
85
+
86
+
87
+ class Molmo2AdapterConfig(PretrainedConfig):
88
+ r"""
89
+ This is the configuration class to store the configuration of Molmo2Adapter. With Molmo2VitConfig,
90
+ It is used to instantiate an Molmo2VisionBackbone according to the specified arguments,
91
+ defining the model architecture.
92
+
93
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
94
+ documentation from [`PretrainedConfig`] for more information.
95
+
96
+ Example:
97
+
98
+ ```python
99
+ >>> from transformers import Molmo2VitConfig, Molmo2AdapterConfig, Molmo2VisionBackbone
100
+
101
+ >>> # Initializing a Molmo2VitConfig and a Molmo2AdapterConfig
102
+ >>> vit_config = Molmo2VitConfig()
103
+ >>> adapter_config = MolmoPoolingConfig()
104
+
105
+ >>> # Initializing a Molmo2VisionBackbone (with random weights)
106
+ >>> model = Molmo2VisionBackbone(vit_config, adapter_config)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> vit_configuration = model.vit_config
110
+ >>> adapter_configuration = model.adapter_config
111
+ ```"""
112
+
113
+ model_type = "molmo2"
114
+ base_config_key = "adapter_config"
115
+
116
+ def __init__(
117
+ self,
118
+ vit_layers: tuple = (-3, -9),
119
+ pooling_attention_mask: bool = False,
120
+ hidden_size: int = 1152,
121
+ num_attention_heads: int = 16,
122
+ num_key_value_heads: int = 16,
123
+ head_dim: int = 72,
124
+ float32_attention: bool = True,
125
+ attention_dropout: float = 0.0,
126
+ residual_dropout: float = 0.0,
127
+ hidden_act: str = "silu",
128
+ intermediate_size: int = 18944,
129
+ text_hidden_size: int = 3584,
130
+ image_feature_dropout: float = 0.0,
131
+ initializer_range: float = 0.02,
132
+ attn_implementation: str = "eager",
133
+ **kwargs,
134
+ ):
135
+ self.attn_implementation = attn_implementation
136
+ super().__init__(
137
+ attn_implementation=attn_implementation,
138
+ **kwargs
139
+ )
140
+ self.vit_layers = vit_layers
141
+ self.pooling_attention_mask = pooling_attention_mask
142
+ self.hidden_size = hidden_size
143
+ self.num_attention_heads = num_attention_heads
144
+ self.num_key_value_heads = num_key_value_heads
145
+ self.head_dim = head_dim
146
+ self.float32_attention = float32_attention
147
+ self.attention_dropout = attention_dropout
148
+ self.residual_dropout = residual_dropout
149
+ self.hidden_act = hidden_act
150
+ self.intermediate_size = intermediate_size
151
+ self.text_hidden_size = text_hidden_size
152
+ self.image_feature_dropout = image_feature_dropout
153
+ self.initializer_range = initializer_range
154
+
155
+
156
+ class Molmo2TextConfig(PretrainedConfig):
157
+ r"""
158
+ This is the configuration class to store the configuration of a [`Molmo2TextModel`]. It is used to instantiate a
159
+ `Molmo2TextModel` according to the specified arguments, defining the model architecture.
160
+
161
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
162
+ documentation from [`PretrainedConfig`] for more information.
163
+
164
+ Example:
165
+ ```python
166
+ >>> from transformers import Molmo2TextConfig, Molmo2TextModel
167
+
168
+ >>> # Initializing a Molmo2TextConfig
169
+ >>> configuration = Molmo2TextConfig()
170
+
171
+ >>> # Initializing a Molmo2TextModel (with random weights)
172
+ >>> model = Molmo2TextModel(configuration)
173
+
174
+ >>> # Accessing the model configuration
175
+ >>> configuration = model.config
176
+ ```"""
177
+
178
+ model_type = "molmo2_text"
179
+ base_config_key = "text_config"
180
+ keys_to_ignore_at_inference = ["past_key_values"]
181
+ base_model_tp_plan = {
182
+ "blocks.*.self_attn.att_proj": "colwise",
183
+ "blocks.*.self_attn.attn_out": "rowwise",
184
+ "blocks.*.mlp.ff_proj": "colwise",
185
+ "blocks.*.mlp.ff_out": "rowwise",
186
+ }
187
+ base_model_pp_plan = {
188
+ "wte": (["input_ids"], ["inputs_embeds"]),
189
+ "blocks": (["hidden_states", "attention_mask"], ["hidden_states"]),
190
+ "ln_f": (["hidden_states"], ["hidden_states"]),
191
+ }
192
+
193
+ def __init__(
194
+ self,
195
+ hidden_size: int = 3584,
196
+ num_attention_heads: int = 28,
197
+ num_key_value_heads: Optional[int] = 4,
198
+ head_dim: int = 128,
199
+ vocab_size: int = 152064,
200
+ additional_vocab_size: int = 128,
201
+ qkv_bias: bool = True,
202
+ num_hidden_layers: int = 48,
203
+ intermediate_size: int = 18944,
204
+ hidden_act: str = "silu",
205
+ embedding_dropout: float=0.0,
206
+ attention_dropout: float=0.0,
207
+ residual_dropout: float = 0.0,
208
+ max_position_embeddings: int = 4096,
209
+ rope_theta: float = 1000000.0,
210
+ rope_scaling: dict[str, Any] = None,
211
+ rope_scaling_layers: Optional[list[int]] = None,
212
+ use_qk_norm: bool = False,
213
+ qk_norm_type: str = "olmo",
214
+ layer_norm_eps: int = 1e-6,
215
+ norm_after: bool = False,
216
+ initializer_range: float = 0.02,
217
+ use_cache=True,
218
+ tie_word_embeddings=False,
219
+ attn_implementation: str = "eager",
220
+ **kwargs,
221
+ ):
222
+ self.attn_implementation = attn_implementation
223
+ super().__init__(
224
+ tie_word_embeddings=tie_word_embeddings,
225
+ attn_implementation=attn_implementation,
226
+ **kwargs
227
+ )
228
+ self.hidden_size = hidden_size
229
+ self.num_attention_heads = num_attention_heads
230
+ if num_key_value_heads is None:
231
+ num_key_value_heads = num_attention_heads
232
+ self.num_key_value_heads = num_key_value_heads
233
+ self.head_dim = head_dim
234
+ self.vocab_size = vocab_size
235
+ self.additional_vocab_size = additional_vocab_size
236
+ self.qkv_bias = qkv_bias
237
+ self.num_hidden_layers = num_hidden_layers
238
+ self.intermediate_size = intermediate_size
239
+ self.hidden_act = hidden_act
240
+ self.embedding_dropout = embedding_dropout
241
+ self.attention_dropout = attention_dropout
242
+ self.residual_dropout = residual_dropout
243
+ self.max_position_embeddings = max_position_embeddings
244
+ self.rope_theta = rope_theta
245
+ self.rope_scaling = rope_scaling
246
+ self.rope_scaling_layers = rope_scaling_layers
247
+ self.use_qk_norm = use_qk_norm
248
+ self.qk_norm_type = qk_norm_type
249
+ self.layer_norm_eps = layer_norm_eps
250
+ self.norm_after = norm_after
251
+ self.initializer_range = initializer_range
252
+ self.use_cache = use_cache
253
+
254
+ # Validate the correctness of rotary position embeddings parameters
255
+ rope_config_validation(self)
256
+
257
+
258
+ class Molmo2Config(PretrainedConfig):
259
+ r"""
260
+ This is the configuration class to store the configuration of a [`Molmo2ForConditionalGeneration`].
261
+ It is used to instantiate an Molmo2 model according to the specified arguments, defining the model architecture.
262
+
263
+ Example:
264
+
265
+ ```python
266
+ >>> from transformers import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
267
+
268
+ >>> # Initializing a Molmo2VitConfig
269
+ >>> vit_config = Molmo2VitConfig()
270
+
271
+ >>> # Initializing a Molmo2AdapterConfig
272
+ >>> adapter_config = Molmo2AdapterConfig()
273
+
274
+ >>> # Initializing a Molmo2TextConfig
275
+ >>> text_config = Molmo2TextConfig()
276
+
277
+ >>> # Initializing a Molmo2Config
278
+ >>> configuration = Molmo2Config(
279
+ >>> vit_config=vit_config,
280
+ >>> adapter_config=adapter_config,
281
+ >>> text_config=text_config,
282
+ >>> image_start_token_id=151936,
283
+ >>> image_end_token_id=151937,
284
+ >>> image_patch_id=151938,
285
+ >>> image_col_id=151939,
286
+ >>> low_res_image_start_token_id=151940,
287
+ >>> image_low_res_id=151942,
288
+ >>> frame_start_token_id=151943,
289
+ >>> frame_end_token_id=151944,
290
+ >>> )
291
+
292
+ >>> # Initializing a model
293
+ >>> model = Molmo2ForConditionalGeneration(configuration)
294
+
295
+ >>> # Accessing the model configuration
296
+ >>> configuration = model.config
297
+ ```"""
298
+
299
+ model_type = "molmo2"
300
+ sub_configs = {
301
+ "text_config": Molmo2TextConfig,
302
+ "vit_config": Molmo2VitConfig,
303
+ "adapter_config": Molmo2AdapterConfig,
304
+ }
305
+
306
+ def __init__(
307
+ self,
308
+ vit_config: Molmo2VitConfig = None,
309
+ adapter_config: Molmo2AdapterConfig = None,
310
+ text_config: Molmo2TextConfig = None,
311
+ image_start_token_id: int = None,
312
+ low_res_image_start_token_id: int = None,
313
+ image_end_token_id: int = None,
314
+ image_low_res_id: int = None,
315
+ image_patch_id: int = None,
316
+ image_col_id: int = None,
317
+ frame_start_token_id: int = None,
318
+ frame_end_token_id: int = None,
319
+ use_frame_special_tokens: bool = True,
320
+ initializer_range: float = 0.02,
321
+ **kwargs,
322
+ ):
323
+ super().__init__(**kwargs)
324
+ if vit_config is None:
325
+ self.vit_config = Molmo2VitConfig()
326
+ elif isinstance(vit_config, dict):
327
+ self.vit_config = Molmo2VitConfig(**vit_config)
328
+ else:
329
+ self.vit_config = vit_config
330
+ if adapter_config is None:
331
+ self.adapter_config = Molmo2AdapterConfig()
332
+ elif isinstance(adapter_config, dict):
333
+ self.adapter_config = Molmo2AdapterConfig(**adapter_config)
334
+ else:
335
+ self.adapter_config = adapter_config
336
+ if text_config is None:
337
+ self.text_config = Molmo2TextConfig()
338
+ elif isinstance(text_config, dict):
339
+ self.text_config = Molmo2TextConfig(**text_config)
340
+ else:
341
+ self.text_config = text_config
342
+ self.image_start_token_id = image_start_token_id
343
+ self.low_res_image_start_token_id = low_res_image_start_token_id
344
+ self.image_end_token_id = image_end_token_id
345
+ self.image_low_res_id = image_low_res_id
346
+ self.image_high_res_id = image_patch_id
347
+ self.image_patch_id = image_patch_id
348
+ self.image_col_id = image_col_id
349
+ self.frame_start_token_id = frame_start_token_id
350
+ self.frame_end_token_id = frame_end_token_id
351
+ self.use_frame_special_tokens = use_frame_special_tokens
352
+ self.initializer_range = initializer_range
353
+
354
+ @property
355
+ def image_num_patch(self):
356
+ assert self.vit_config is not None
357
+ return self.vit_config.image_num_patch
358
+
359
+ @property
360
+ def num_attention_heads(self):
361
+ return self.text_config.num_attention_heads
362
+
363
+ @property
364
+ def num_key_value_heads(self):
365
+ return self.text_config.num_key_value_heads
366
+
367
+ @property
368
+ def head_dim(self):
369
+ return self.text_config.head_dim
370
+
371
+ @property
372
+ def num_hidden_layers(self):
373
+ return self.text_config.num_hidden_layers
374
+
375
+ @property
376
+ def hidden_size(self):
377
+ return self.text_config.hidden_size
378
+
379
+ @property
380
+ def vocab_size(self):
381
+ return self.text_config.vocab_size
382
+
383
+ @property
384
+ def max_position_embeddings(self):
385
+ return self.text_config.max_position_embeddings
386
+
387
+
388
+ Molmo2VitConfig.register_for_auto_class()
389
+ Molmo2AdapterConfig.register_for_auto_class()
390
+ Molmo2TextConfig.register_for_auto_class()
391
+ Molmo2Config.register_for_auto_class()
configuration_molmo_point.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Molmo2 configuration
3
+ """
4
+
5
+ from typing import Optional
6
+
7
+ from transformers import PretrainedConfig, LogitsProcessor
8
+ from transformers.utils import logging
9
+
10
+ from .configuration_molmo2 import Molmo2TextConfig, Molmo2VitConfig, \
11
+ Molmo2AdapterConfig
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class MolmoPointAdapterConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of Molmo2Adapter. With Molmo2VitConfig,
19
+ It is used to instantiate an Molmo2VisionBackbone according to the specified arguments,
20
+ defining the model architecture.
21
+
22
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
23
+ documentation from [`PretrainedConfig`] for more information.
24
+
25
+ Example:
26
+
27
+ ```python
28
+ >>> from transformers import Molmo2VitConfig, Molmo2AdapterConfig, Molmo2VisionBackbone
29
+
30
+ >>> # Initializing a Molmo2VitConfig and a Molmo2AdapterConfig
31
+ >>> vit_config = Molmo2VitConfig()
32
+ >>> adapter_config = MolmoPoolingConfig()
33
+
34
+ >>> # Initializing a Molmo2VisionBackbone (with random weights)
35
+ >>> model = Molmo2VisionBackbone(vit_config, adapter_config)
36
+
37
+ >>> # Accessing the model configuration
38
+ >>> vit_configuration = model.vit_config
39
+ >>> adapter_configuration = model.adapter_config
40
+ ```"""
41
+
42
+ model_type = "molmo_point"
43
+ base_config_key = "adapter_config"
44
+
45
+ def __init__(
46
+ self,
47
+ vit_layers: tuple = (-3, -9),
48
+ pooling_attention_mask: bool = False,
49
+ hidden_size: int = 1152,
50
+ num_attention_heads: int = 16,
51
+ num_key_value_heads: int = 16,
52
+ head_dim: int = 72,
53
+ float32_attention: bool = True,
54
+ attention_dropout: float = 0.0,
55
+ residual_dropout: float = 0.0,
56
+ hidden_act: str = "silu",
57
+ intermediate_size: int = 18944,
58
+ text_hidden_size: int = 3584,
59
+ image_feature_dropout: float = 0.0,
60
+ initializer_range: float = 0.02,
61
+ attn_implementation: str = "eager",
62
+ positional_embeddings: int = 16,
63
+ attention_pooling_out_layer: bool = False,
64
+ **kwargs,
65
+ ):
66
+ self.attn_implementation = attn_implementation
67
+ super().__init__(
68
+ attn_implementation=attn_implementation,
69
+ **kwargs
70
+ )
71
+ self.vit_layers = vit_layers
72
+ self.pooling_attention_mask = pooling_attention_mask
73
+ self.hidden_size = hidden_size
74
+ self.num_attention_heads = num_attention_heads
75
+ self.num_key_value_heads = num_key_value_heads
76
+ self.head_dim = head_dim
77
+ self.float32_attention = float32_attention
78
+ self.attention_dropout = attention_dropout
79
+ self.residual_dropout = residual_dropout
80
+ self.hidden_act = hidden_act
81
+ self.intermediate_size = intermediate_size
82
+ self.text_hidden_size = text_hidden_size
83
+ self.image_feature_dropout = image_feature_dropout
84
+ self.initializer_range = initializer_range
85
+ self.positional_embeddings = positional_embeddings
86
+ self.attention_pooling_out_layer = attention_pooling_out_layer
87
+
88
+
89
+ class MolmoPointConfig(PretrainedConfig):
90
+ r"""
91
+ This is the configuration class to store the configuration of a [`MolmoPointForConditionalGeneration`].
92
+ It is used to instantiate an Molmo2 model according to the specified arguments, defining the model architecture.
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
98
+
99
+ >>> # Initializing a Molmo2VitConfig
100
+ >>> vit_config = Molmo2VitConfig()
101
+
102
+ >>> # Initializing a Molmo2AdapterConfig
103
+ >>> adapter_config = MolmoPointAdapterConfig()
104
+
105
+ >>> # Initializing a Molmo2TextConfig
106
+ >>> text_config = Molmo2TextConfig()
107
+
108
+ >>> # Initializing a Molmo2Config
109
+ >>> configuration = MolmoPointConfig(
110
+ >>> vit_config=vit_config,
111
+ >>> adapter_config=adapter_config,
112
+ >>> text_config=text_config,
113
+ >>> image_start_token_id=151936,
114
+ >>> image_end_token_id=151937,
115
+ >>> image_patch_id=151938,
116
+ >>> image_col_id=151939,
117
+ >>> low_res_image_start_token_id=151940,
118
+ >>> image_low_res_id=151942,
119
+ >>> frame_start_token_id=151943,
120
+ >>> frame_end_token_id=151944,
121
+ >>> )
122
+
123
+ >>> # Initializing a model
124
+ >>> model = MolmoPointForConditionalGeneration(configuration)
125
+
126
+ >>> # Accessing the model configuration
127
+ >>> configuration = model.config
128
+ ```"""
129
+
130
+ model_type = "molmo_point"
131
+ sub_configs = {
132
+ "text_config": Molmo2TextConfig,
133
+ "vit_config": Molmo2VitConfig,
134
+ "adapter_config": MolmoPointAdapterConfig,
135
+ }
136
+
137
+ def __init__(
138
+ self,
139
+ vit_config: Molmo2VitConfig = None,
140
+ adapter_config: MolmoPointAdapterConfig = None,
141
+ text_config: Molmo2TextConfig = None,
142
+ image_start_token_id: int = None,
143
+ low_res_image_start_token_id: int = None,
144
+ image_end_token_id: int = None,
145
+ image_patch_id: int = None,
146
+ image_non_indexable_patch_id: int = None,
147
+ image_col_id: int = None,
148
+ frame_start_token_id: int = None,
149
+ frame_end_token_id: int = None,
150
+ patch_token_id: int = None,
151
+ subpatch_token_id: int = None,
152
+ location_token_id: int = None,
153
+ use_frame_special_tokens: bool = True,
154
+ initializer_range: float = 0.02,
155
+
156
+ # point config
157
+ patch_location: Optional[str]="3x3",
158
+ no_more_points_class: bool=False,
159
+ patch_embed_dim: int=256,
160
+ patch_embedding_kind: str="linear",
161
+ embed_selected_vit_patch: Optional[str]="linear",
162
+ embed_location: bool=False,
163
+ layer_norm_x: bool=True,
164
+ norm_logits: bool=True,
165
+ # FIXME figure out how infernce params work
166
+ mask_patches: Optional[str]="always",
167
+ mask_subpatches: str="inference",
168
+ mask_repeats: Optional[str]="inference",
169
+ token_prediction_rotary: bool=True,
170
+ token_prediction_rotary_theta: Optional[float]=50000,
171
+ **kwargs,
172
+ ):
173
+ super().__init__(**kwargs)
174
+ if vit_config is None:
175
+ self.vit_config = Molmo2VitConfig()
176
+ elif isinstance(vit_config, dict):
177
+ self.vit_config = Molmo2VitConfig(**vit_config)
178
+ else:
179
+ self.vit_config = vit_config
180
+ if adapter_config is None:
181
+ self.adapter_config = Molmo2AdapterConfig()
182
+ elif isinstance(adapter_config, dict):
183
+ self.adapter_config = Molmo2AdapterConfig(**adapter_config)
184
+ else:
185
+ self.adapter_config = adapter_config
186
+ if text_config is None:
187
+ self.text_config = Molmo2TextConfig()
188
+ elif isinstance(text_config, dict):
189
+ self.text_config = Molmo2TextConfig(**text_config)
190
+ else:
191
+ self.text_config = text_config
192
+ self.image_start_token_id = image_start_token_id
193
+ self.low_res_image_start_token_id = low_res_image_start_token_id
194
+ self.image_end_token_id = image_end_token_id
195
+ self.image_high_res_id = image_patch_id
196
+ self.image_non_indexable_patch_id = image_non_indexable_patch_id
197
+ self.image_patch_id = image_patch_id
198
+ self.image_col_id = image_col_id
199
+ self.frame_start_token_id = frame_start_token_id
200
+ self.frame_end_token_id = frame_end_token_id
201
+ self.patch_token_id = patch_token_id
202
+ self.subpatch_token_id = subpatch_token_id
203
+ self.location_token_id = location_token_id
204
+ self.use_frame_special_tokens = use_frame_special_tokens
205
+ self.initializer_range = initializer_range
206
+ self.patch_location = patch_location
207
+ self.no_more_points_class = no_more_points_class
208
+ self.patch_embed_dim = patch_embed_dim
209
+ self.patch_embedding_kind = patch_embedding_kind
210
+ self.embed_selected_vit_patch = embed_selected_vit_patch
211
+ self.embed_location = embed_location
212
+ self.layer_norm_x = layer_norm_x
213
+ self.norm_logits = norm_logits
214
+ self.mask_patches = mask_patches
215
+ self.mask_subpatches = mask_subpatches
216
+ self.mask_repeats = mask_repeats
217
+ self.token_prediction_rotary = token_prediction_rotary
218
+ self.token_prediction_rotary_theta = token_prediction_rotary_theta
219
+
220
+ @property
221
+ def image_num_patch(self):
222
+ assert self.vit_config is not None
223
+ return self.vit_config.image_num_patch
224
+
225
+ @property
226
+ def num_attention_heads(self):
227
+ return self.text_config.num_attention_heads
228
+
229
+ @property
230
+ def num_key_value_heads(self):
231
+ return self.text_config.num_key_value_heads
232
+
233
+ @property
234
+ def head_dim(self):
235
+ return self.text_config.head_dim
236
+
237
+ @property
238
+ def num_hidden_layers(self):
239
+ return self.text_config.num_hidden_layers
240
+
241
+ @property
242
+ def hidden_size(self):
243
+ return self.text_config.hidden_size
244
+
245
+ @property
246
+ def vocab_size(self):
247
+ return self.text_config.vocab_size
248
+
249
+ @property
250
+ def max_position_embeddings(self):
251
+ return self.text_config.max_position_embeddings
252
+
253
+
254
+ MolmoPointAdapterConfig.register_for_auto_class()
255
+ MolmoPointConfig.register_for_auto_class()
convert_molmo2_to_hf.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import shutil
4
+ import logging
5
+ import json
6
+ import gc
7
+ from typing import Dict, Any, Optional
8
+
9
+ import torch
10
+ from transformers import GenerationConfig
11
+ from transformers.image_utils import (
12
+ PILImageResampling,
13
+ IMAGENET_STANDARD_MEAN,
14
+ IMAGENET_STANDARD_STD,
15
+ )
16
+
17
+ from olmo.models.molmo2.molmo2 import Molmo2Config as ModelConfig
18
+ from olmo.train.checkpointer import load_model_state
19
+ from olmo.util import (
20
+ prepare_cli_environment,
21
+ resource_path,
22
+ select_checkpoint
23
+ )
24
+
25
+ from .configuration_molmo2 import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
26
+ from .modeling_molmo2 import Molmo2ForConditionalGeneration
27
+ from .processing_molmo2 import Molmo2Processor
28
+ from .image_processing_molmo2 import Molmo2ImageProcessor
29
+ from .video_processing_molmo2 import Molmo2VideoProcessor
30
+
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ CHAT_TEMPLATE = (
36
+ "{% set DEMO_STYLES = ["
37
+ "'point_count','pointing','cosyn_point','user_qa','long_caption','short_caption',"
38
+ "'video_long_caption','video_short_caption','video_point_track_per_frame',"
39
+ "'video_point_track_start_end','video_point_track_all_frames','video_single_point_track_start_end',"
40
+ "'video_transcript','video_clip_caption_start_end','video_clip_caption_start_end_in_seconds',"
41
+ "'video_clip_transcript_start_end','video_clip_transcript_start_end_in_seconds',"
42
+ "'video_frame_caption_timestamp','video_frame_caption_timestamp_in_seconds',"
43
+ "'correction_qa','text_sft','video_point','video_point_count','video_count','video_count_point',"
44
+ "'multi_image_pointing','multi_image_counting','multi_image_point_then_count','multi_image_count_then_point','demo',"
45
+ "'a_okvqa_mc','ai2_diagram_no_letter','ai2_diagram','science_qa',"
46
+ "'multi_image_mc','multi_image_mc_exp','mantis_instruct_mc',"
47
+ "'video_multiple_choice','video_multiple_choice_count_without_pointing',"
48
+ "'video_multiple_choice_multiple_correct','video_multiple_choice_w_subtitle'"
49
+ "] %}"
50
+
51
+ "{% set image_count = namespace(value=0) %}"
52
+ "{% set video_count = namespace(value=0) %}"
53
+
54
+ "{% set has_subtitle = messages and messages[0]['role'].lower() == 'subtitle' %}"
55
+
56
+ "{% for message in messages %}"
57
+ "{% if message['content'] is not string %}"
58
+ "{% for content in message['content'] %}"
59
+ "{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}"
60
+ "{% set image_count.value = image_count.value + 1 %}"
61
+ "{% elif content['type'] == 'video' or 'video' in content or 'video_url' in content %}"
62
+ "{% set video_count.value = video_count.value + 1 %}"
63
+ "{% endif %}"
64
+ "{% endfor %}"
65
+ "{% endif %}"
66
+ "{% endfor %}"
67
+
68
+ "{% if image_count.value == 1 %}"
69
+ "{{ '<|image|>' }}"
70
+ "{% elif image_count.value > 1 %}"
71
+ "{% for i in range(image_count.value) %}"
72
+ "{{ 'Image ' ~ (i + 1) ~ '<|image|>' }}"
73
+ "{% endfor %}"
74
+ "{% endif %}"
75
+
76
+ "{% for _ in range(video_count.value) %}"
77
+ "{{ '<|video|>' }}"
78
+ "{% endfor %}"
79
+
80
+ "{% if has_subtitle %}"
81
+ "{{ messages[0]['content'] }}"
82
+ "{% endif %}"
83
+
84
+ "{% for message in messages %}"
85
+ "{% set role = message['role'].lower() %}"
86
+
87
+ "{% if role == 'subtitle' %}"
88
+ "{% continue %}"
89
+ "{% endif %}"
90
+
91
+ "{% set conv_index = loop.index - (1 if has_subtitle else 0) %}"
92
+
93
+ "{%- if (conv_index % 2 == 1 and role != 'user') "
94
+ "or (conv_index % 2 == 0 and role != 'assistant') -%}"
95
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
96
+ "{%- endif -%}"
97
+
98
+ "{% if message['content'] is string %}"
99
+ "{% set text_content = message['content'] %}"
100
+ "{% else %}"
101
+ "{% set m = namespace(text='') %}"
102
+ "{% for content in message['content'] %}"
103
+ "{% if content['type'] == 'text' %}"
104
+ "{% if content['style'] is defined and content['style'] not in DEMO_STYLES %}"
105
+ "{% set seg = content['style'] ~ ': ' ~ content['text'] %}"
106
+ "{% else %}"
107
+ "{% set seg = content['text'] %}"
108
+ "{% endif %}"
109
+ "{% set m.text = m.text ~ ('' if not m.text else ' ') ~ seg %}"
110
+ "{% endif %}"
111
+ "{% endfor %}"
112
+ "{% set text_content = m.text %}"
113
+ "{% endif %}"
114
+
115
+ "{% if role == 'user' %}"
116
+ "{% if not (has_subtitle and loop.index == 2) and not (not has_subtitle and loop.first) %}{{ '<|im_end|>\\n' }}{% endif %}"
117
+ "{{ '<|im_start|>user\\n' }}"
118
+ "{{ text_content }}"
119
+ "{{ '<|im_end|>\\n' }}"
120
+ "{% else %} {# assistant #}"
121
+ "{{ '<|im_start|>assistant\\n' }}"
122
+ "{{ text_content }}"
123
+ "{% endif %}"
124
+ "{% endfor %}"
125
+
126
+ "{% if add_generation_prompt %}"
127
+ "{{ '<|im_start|>assistant\\n' }}"
128
+ "{% endif %}"
129
+ )
130
+
131
+
132
+ def convert_config(
133
+ model_config: ModelConfig,
134
+ attn_implementation: str,
135
+ override_max_model_len: Optional[int],
136
+ ) -> Molmo2Config:
137
+ """Convert config to HF-compatible config"""
138
+ vision_backbone_cfg = model_config.vision_backbone
139
+ vit_config = vision_backbone_cfg.vit
140
+ llm_config = model_config.llm
141
+
142
+ molmo2_vit_config = Molmo2VitConfig(
143
+ hidden_size=vit_config.image_emb_dim,
144
+ intermediate_size=vit_config.image_mlp_dim,
145
+ num_hidden_layers=vit_config.image_num_layers,
146
+ num_attention_heads=vit_config.image_num_heads,
147
+ num_key_value_heads=vit_config.image_num_key_value_heads,
148
+ head_dim=vit_config.image_head_dim,
149
+ hidden_act=vit_config.image_mlp_activations,
150
+ layer_norm_eps=vit_config.image_norm_eps,
151
+ image_default_input_size=vit_config.image_default_input_size,
152
+ image_patch_size=vit_config.image_patch_size,
153
+ image_num_pos=vit_config.image_num_pos,
154
+ attention_dropout=0.0,
155
+ residual_dropout=0.0,
156
+ initializer_range=vit_config.initializer_range,
157
+ float32_attention=vit_config.float32_attention,
158
+ attn_implementation=attn_implementation,
159
+ )
160
+ adapter_hidden_act = "silu" if llm_config.activation_type == "swiglu" else llm_config.activation_type
161
+ adapter_intermediate_size = (
162
+ llm_config.mlp_hidden_size if llm_config.mlp_hidden_size is not None
163
+ else llm_config.mlp_ratio * llm_config.d_model
164
+ ) // 2
165
+ molmo2_adapter_config = Molmo2AdapterConfig(
166
+ vit_layers=vision_backbone_cfg.vit_layers,
167
+ pooling_attention_mask=vision_backbone_cfg.pooling_attention_mask,
168
+ hidden_size=vit_config.image_emb_dim,
169
+ num_attention_heads=vit_config.image_num_heads,
170
+ num_key_value_heads=vit_config.image_num_key_value_heads,
171
+ head_dim=vit_config.image_head_dim,
172
+ float32_attention=vit_config.float32_attention,
173
+ attention_dropout=0.0,
174
+ residual_dropout=0.0,
175
+ hidden_act=adapter_hidden_act,
176
+ intermediate_size=adapter_intermediate_size,
177
+ text_hidden_size=llm_config.d_model,
178
+ image_feature_dropout=vision_backbone_cfg.image_feature_dropout,
179
+ initializer_range=llm_config.initializer_range,
180
+ attn_implementation=attn_implementation,
181
+ )
182
+ llm_head_dim = llm_config.d_model // llm_config.n_heads if llm_config.head_dim is None else llm_config.head_dim
183
+ llm_intermediate_size = (
184
+ llm_config.mlp_hidden_size if llm_config.mlp_hidden_size is not None
185
+ else llm_config.mlp_ratio * llm_config.d_model
186
+ ) // 2
187
+ llm_hidden_act = "silu" if llm_config.activation_type == "swiglu" else llm_config.activation_type
188
+ rope_scaling: Optional[Dict[str, Any]] = None
189
+ if llm_config.rope_type != "default":
190
+ rope_scaling = dict(rope_type=llm_config.rope_type)
191
+ for key in [
192
+ "rope_factor",
193
+ "rope_high_freq_factor",
194
+ "rope_low_freq_factor",
195
+ "rope_attention_factor",
196
+ "rope_original_max_position_embeddings",
197
+ "rope_beta_fast",
198
+ "rope_beta_slow",
199
+ "rope_mscale",
200
+ "rope_mscale_all_dim",
201
+ "rope_truncate",
202
+ ]:
203
+ if getattr(llm_config, key) is not None:
204
+ rope_scaling[key[len("rope_"):]] = getattr(llm_config, key)
205
+
206
+ max_position_embeddings = llm_config.max_position_embeddings or llm_config.max_sequence_length
207
+ if override_max_model_len is not None:
208
+ max_position_embeddings = override_max_model_len
209
+ rope_scaling_layers: list[int] | None = None
210
+ if llm_config.full_attention_layers is not None:
211
+ # HACK: The original Olmo3 applies scaling to full attention layers,
212
+ # while we applies scaling to slinding attention layers.
213
+ if llm_config.sliding_attention_rope_scaling:
214
+ rope_scaling_layers = [idx for idx in range(llm_config.n_layers) if idx not in llm_config.full_attention_layers]
215
+ else:
216
+ rope_scaling_layers = list(llm_config.full_attention_layers)
217
+ molmo2_text_config = Molmo2TextConfig(
218
+ hidden_size=llm_config.d_model,
219
+ num_attention_heads=llm_config.n_heads,
220
+ num_key_value_heads=llm_config.effective_n_kv_heads,
221
+ head_dim=llm_head_dim,
222
+ vocab_size=llm_config.embedding_size or llm_config.vocab_size,
223
+ additional_vocab_size=llm_config.additional_vocab_size,
224
+ qkv_bias=llm_config.qkv_bias,
225
+ num_hidden_layers=llm_config.n_layers,
226
+ intermediate_size=llm_intermediate_size,
227
+ hidden_act=llm_hidden_act,
228
+ embedding_dropout=0.0,
229
+ attention_dropout=0.0,
230
+ residual_dropout=0.0,
231
+ max_position_embeddings=max_position_embeddings,
232
+ rope_theta=llm_config.rope_theta,
233
+ rope_scaling=rope_scaling,
234
+ rope_scaling_layers=rope_scaling_layers,
235
+ use_qk_norm=llm_config.attention_layer_norm,
236
+ qk_norm_type=llm_config.attention_layer_norm_type,
237
+ layer_norm_eps=llm_config.layer_norm_eps,
238
+ norm_after=llm_config.norm_after,
239
+ initializer_range=llm_config.initializer_range,
240
+ attn_implementation=attn_implementation,
241
+ )
242
+
243
+ tokenizer = model_config.build_tokenizer()
244
+ image_start_token_id = tokenizer.image_start_token_id
245
+ image_end_token_id = tokenizer.image_end_token_id
246
+ low_res_image_start_token_id = tokenizer.low_res_image_start_token_id
247
+ image_low_res_id = tokenizer.image_low_res_token_id
248
+ image_patch_id = tokenizer.image_patch_token_id
249
+ image_col_id = tokenizer.image_col_token_id
250
+ frame_start_token_id = tokenizer.frame_start_token_id
251
+ frame_end_token_id = tokenizer.frame_end_token_id
252
+
253
+ use_frame_special_tokens = getattr(model_config.mm_preprocessor, "use_frame_special_tokens", False)
254
+
255
+ molmo2_config = Molmo2Config(
256
+ vit_config=molmo2_vit_config,
257
+ adapter_config=molmo2_adapter_config,
258
+ text_config=molmo2_text_config,
259
+ image_start_token_id=image_start_token_id,
260
+ low_res_image_start_token_id=low_res_image_start_token_id,
261
+ image_end_token_id=image_end_token_id,
262
+ image_low_res_id=image_low_res_id,
263
+ image_patch_id=image_patch_id,
264
+ image_col_id=image_col_id,
265
+ frame_start_token_id=frame_start_token_id,
266
+ frame_end_token_id=frame_end_token_id,
267
+ use_frame_special_tokens=use_frame_special_tokens,
268
+ initializer_range=llm_config.initializer_range,
269
+ use_cache=True,
270
+ tie_word_embeddings=False, # Always false for Molmo2
271
+ )
272
+ return molmo2_config
273
+
274
+
275
+ def convert_lm_head_and_prefix(
276
+ state_dict: dict[str, Any],
277
+ base_model_prefix: str,
278
+ weight_tying: bool
279
+ ) -> dict[str, Any]:
280
+ new_state_dict = {}
281
+ for key, val in state_dict.items():
282
+ if key == "transformer.ff_out.weight":
283
+ new_key = "lm_head.weight"
284
+ else:
285
+ new_key = f"{base_model_prefix}.{key}"
286
+ new_state_dict[new_key] = val
287
+
288
+ if weight_tying:
289
+ new_state_dict["lm_head.weight"] = state_dict["transformer.wte.embedding"]
290
+
291
+ return new_state_dict
292
+
293
+
294
+ def convert_molmo2(
295
+ state_dict: dict[str, Any],
296
+ config: Molmo2Config,
297
+ weight_tying: bool,
298
+ ) -> dict[str, Any]:
299
+ base_model_prefix = Molmo2ForConditionalGeneration.base_model_prefix
300
+ new_state_dict = convert_lm_head_and_prefix(state_dict, base_model_prefix, weight_tying)
301
+ model_prefix = f"{base_model_prefix}.transformer"
302
+ qkv_bias = config.qkv_bias if isinstance(config, Molmo2TextConfig) else config.text_config.qkv_bias
303
+ use_qk_norm = config.use_qk_norm if isinstance(config, Molmo2TextConfig) else config.text_config.use_qk_norm
304
+ for layer_i in range(config.num_hidden_layers):
305
+ prefix = f"{model_prefix}.blocks.{layer_i}"
306
+
307
+ move_to_attn = ["att_proj.weight", "attn_out.weight"]
308
+ if qkv_bias:
309
+ move_to_attn.append("att_proj.bias")
310
+ if use_qk_norm:
311
+ move_to_attn += ["q_norm.weight", "k_norm.weight"]
312
+
313
+ for k in move_to_attn:
314
+ assert f"{prefix}.self_attn.{k}" not in new_state_dict
315
+ new_state_dict[f"{prefix}.self_attn.{k}"] = new_state_dict.pop(f"{prefix}.{k}")
316
+
317
+ move_to_mlp = ["ff_proj.weight", "ff_out.weight"]
318
+ for k in move_to_mlp:
319
+ assert f"{prefix}.mlp.{k}" not in new_state_dict
320
+ new_state_dict[f"{prefix}.mlp.{k}"] = new_state_dict.pop(f"{prefix}.{k}")
321
+
322
+ return new_state_dict
323
+
324
+
325
+ def convert_model(
326
+ checkpoint_dir: str,
327
+ model_config: ModelConfig,
328
+ hf_config: Molmo2Config,
329
+ use_bfloat16: bool,
330
+ ) -> Molmo2ForConditionalGeneration:
331
+ """Convert model to HF-compatible model"""
332
+ with torch.device("meta"):
333
+ model = model_config.build_model()
334
+ hf_model = Molmo2ForConditionalGeneration(hf_config)
335
+ model.to_empty(device=torch.device("cpu"))
336
+ hf_model.to_empty(device=torch.device("cpu"))
337
+
338
+ load_model_state(checkpoint_dir, model)
339
+ model.eval()
340
+ model = model.to(torch.float32)
341
+ state_dict = model.state_dict()
342
+
343
+ new_state_dict = convert_molmo2(state_dict, hf_config, model_config.llm.weight_tying)
344
+ hf_model.eval()
345
+ hf_model = hf_model.to(torch.bfloat16 if use_bfloat16 else torch.float32)
346
+ hf_model.load_state_dict(new_state_dict)
347
+ return hf_model
348
+
349
+
350
+ def save(
351
+ checkpoint_dir: str,
352
+ output_dir: str,
353
+ use_bfloat16: bool,
354
+ attn_implementation: str,
355
+ override_max_model_len: Optional[int],
356
+ ) -> None:
357
+ logger.info(f"Loading model config from {checkpoint_dir}")
358
+ config_path = resource_path(select_checkpoint(checkpoint_dir), "config.yaml")
359
+ model_config: ModelConfig = ModelConfig.load(config_path, key="model", validate_paths=False)
360
+
361
+ hf_config = convert_config(model_config, attn_implementation, override_max_model_len)
362
+
363
+ logger.info(f"Save HF-compatible model config and checkpoint to {output_dir}")
364
+ logger.info(f"Save HF-compatible model config and checkpoint to {output_dir}")
365
+ hf_model = convert_model(checkpoint_dir, model_config, hf_config, use_bfloat16)
366
+
367
+ hf_model.save_pretrained(output_dir)
368
+
369
+ gc.collect()
370
+
371
+ model_file = os.path.join(output_dir, "modeling_molmo2.py")
372
+ if not os.path.exists(model_file):
373
+ logger.warning(f"Copying model file to {model_file} manually")
374
+ shutil.copyfile(
375
+ "olmo/hf_model/modeling_molmo2.py",
376
+ model_file,
377
+ )
378
+
379
+ with open(os.path.join(output_dir, "config.json")) as f:
380
+ config = json.load(f)
381
+
382
+ auto_map = config.get("auto_map", None)
383
+ if auto_map is None:
384
+ auto_map = {}
385
+ if "AutoModelForImageTextToText" not in auto_map:
386
+ logger.warning("Add AutoModelForImageTextToText to auto_map")
387
+ auto_map["AutoModelForImageTextToText"] = "modeling_molmo2.Molmo2ForConditionalGeneration"
388
+ with open(os.path.join(output_dir, "config.json"), "w") as f:
389
+ json.dump(config, f, indent=2)
390
+
391
+ tokenizer = model_config.build_tokenizer().tokenizer
392
+ if not tokenizer.bos_token:
393
+ tokenizer.bos_token = tokenizer.eos_token
394
+ tokenizer.bos_token_id = tokenizer.eos_token_id
395
+ tokenizer.padding_side = "left"
396
+
397
+ tokenizer.chat_template = CHAT_TEMPLATE
398
+
399
+ logger.info(f"Save tokenizer and processor to {output_dir}")
400
+
401
+ mm_cfg = model_config.mm_preprocessor
402
+ vit_cfg = model_config.vision_backbone.vit
403
+
404
+ img_cfg = mm_cfg.image
405
+ video_cfg = mm_cfg.video
406
+
407
+ assert vit_cfg.resize_mode == "siglip", "Only siglip resize is supported for now"
408
+ assert vit_cfg.normalize == "siglip", "Only siglip normalization is supported for now"
409
+ assert img_cfg.crop_mode == "overlap-and-resize-c2", "Only overlap-and-resize-c2 crop mode is supported for now"
410
+ assert img_cfg.max_crops == img_cfg.max_multi_image_crops, "max_crops and max_multi_image_crops must be the same"
411
+ assert img_cfg.pooling_w == img_cfg.multi_image_pooling_w, "pooling_w and multi_image_pooling_w must be the same"
412
+ assert img_cfg.pooling_h == img_cfg.multi_image_pooling_h, "pooling_h and multi_image_pooling_h must be the same"
413
+
414
+ image_processor = Molmo2ImageProcessor(
415
+ size={"height": vit_cfg.image_default_input_size[0], "width": vit_cfg.image_default_input_size[1]},
416
+ resample=PILImageResampling.BILINEAR,
417
+ image_mean=IMAGENET_STANDARD_MEAN,
418
+ image_std=IMAGENET_STANDARD_STD,
419
+ do_convert_rgb=True,
420
+ max_crops=img_cfg.max_crops,
421
+ overlap_margins=img_cfg.overlap_margins,
422
+ patch_size=vit_cfg.image_patch_size,
423
+ pooling_size=[img_cfg.pooling_h, img_cfg.pooling_w],
424
+ )
425
+
426
+ image_use_col_tokens = img_cfg.use_col_tokens
427
+ use_single_crop_col_tokens = img_cfg.use_single_crop_col_tokens
428
+ use_single_crop_start_token = img_cfg.use_single_crop_start_token
429
+
430
+ assert vit_cfg.resize_mode == "siglip", "Only siglip resize is supported for now"
431
+ assert vit_cfg.normalize == "siglip", "Only siglip normalization is supported for now"
432
+ assert video_cfg.time_mode == "per-frame-compact", "Only per-frame-compact time mode is supported for now"
433
+
434
+ max_fps = video_cfg.max_fps
435
+ if isinstance(max_fps, (tuple, list)):
436
+ assert len(max_fps) == 1, "Only one max_fps is supported for now"
437
+ max_fps = max_fps[0]
438
+ video_processor = Molmo2VideoProcessor(
439
+ size={"height": vit_cfg.image_default_input_size[0], "width": vit_cfg.image_default_input_size[1]},
440
+ resample=PILImageResampling.BILINEAR,
441
+ image_mean=IMAGENET_STANDARD_MEAN,
442
+ image_std=IMAGENET_STANDARD_STD,
443
+ do_convert_rgb=True,
444
+ patch_size=vit_cfg.image_patch_size,
445
+ pooling_size=[video_cfg.pooling_h, video_cfg.pooling_w],
446
+ frame_sample_mode=video_cfg.frame_sample_mode,
447
+ num_frames=video_cfg.max_frames,
448
+ max_fps=max_fps,
449
+ sampling_fps=2,
450
+ )
451
+
452
+ video_use_col_tokens = False
453
+ use_frame_special_tokens = video_cfg.use_frame_special_tokens
454
+
455
+ processor = Molmo2Processor(
456
+ image_processor,
457
+ video_processor,
458
+ tokenizer,
459
+ chat_template=CHAT_TEMPLATE,
460
+ image_use_col_tokens=image_use_col_tokens,
461
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
462
+ use_single_crop_start_token=use_single_crop_start_token,
463
+ video_use_col_tokens=video_use_col_tokens,
464
+ use_frame_special_tokens=use_frame_special_tokens,
465
+ )
466
+ processor.audio_tokenizer = None
467
+ processor.save_pretrained(output_dir)
468
+
469
+ logger.info(f"Save generation config to {output_dir}")
470
+ generation_config = GenerationConfig(
471
+ bos_token_id=tokenizer.bos_token_id,
472
+ eos_token_id=tokenizer.eos_token_id,
473
+ pad_token_id=tokenizer.pad_token_id,
474
+ )
475
+ generation_config.save_pretrained(output_dir)
476
+
477
+ del hf_model, processor, tokenizer, generation_config
478
+ gc.collect()
479
+
480
+
481
+ def main():
482
+ parser = argparse.ArgumentParser(
483
+ description="Convert Molmo checkpoint to HuggingFace format."
484
+ )
485
+ parser.add_argument("checkpoint_dir", help="Location of Molmo2 checkpoint.")
486
+ parser.add_argument("output_dir", help="Location to save the converted checkpoint.", default="./hf-ckpt")
487
+ parser.add_argument("--use_bfloat16", action="store_true", help="Use bfloat16 weights")
488
+ parser.add_argument(
489
+ "--attn_implementation", type=str, default="sdpa", help="Attention type",
490
+ choices=["eager", "sdpa", "flash_attention_2"],
491
+ )
492
+ parser.add_argument(
493
+ "--override_max_model_len",
494
+ type=int,
495
+ default=None,
496
+ help="Override the max model length",
497
+ )
498
+ args = parser.parse_args()
499
+ prepare_cli_environment()
500
+
501
+ save(
502
+ args.checkpoint_dir,
503
+ args.output_dir,
504
+ args.use_bfloat16,
505
+ args.attn_implementation,
506
+ args.override_max_model_len,
507
+ )
508
+
509
+
510
+ if __name__ == "__main__":
511
+ main()
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151645,
3
+ "eos_token_id": 151645,
4
+ "pad_token_id": 151643,
5
+ "transformers_version": "4.57.1"
6
+ }
image_processing_molmo2.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image processor class for Molmo2"""
2
+ from typing import Optional, Union
3
+ import numpy as np
4
+ import einops
5
+ import torch
6
+ import torchvision.transforms
7
+
8
+ from transformers.image_utils import (
9
+ IMAGENET_STANDARD_MEAN,
10
+ IMAGENET_STANDARD_STD,
11
+ ImageInput,
12
+ PILImageResampling,
13
+ make_flat_list_of_images,
14
+ valid_images,
15
+ to_numpy_array,
16
+ )
17
+ from transformers.image_transforms import convert_to_rgb
18
+ from transformers.processing_utils import ImagesKwargs
19
+ from transformers.image_processing_utils import BaseImageProcessor, get_size_dict
20
+ from transformers.utils import logging
21
+ from transformers.feature_extraction_utils import BatchFeature
22
+ from transformers.utils import TensorType, logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ def normalize_image(
29
+ image: np.ndarray,
30
+ image_mean: list[float],
31
+ image_std: list[float],
32
+ ) -> np.ndarray:
33
+ image -= np.array(image_mean, dtype=np.float32)[None, None, :]
34
+ image /= np.array(image_std, dtype=np.float32)[None, None, :]
35
+ return image
36
+
37
+
38
+ def resize_image(
39
+ image: np.ndarray,
40
+ desired_output_size: list[int],
41
+ resample: PILImageResampling,
42
+ ) -> np.ndarray:
43
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
44
+ dtype = image.dtype
45
+ if torch.is_floating_point(image):
46
+ in_min = 0.0
47
+ in_max = 1.0
48
+ resized = torchvision.transforms.Resize(
49
+ desired_output_size,
50
+ resample,
51
+ antialias=False,
52
+ )(image)
53
+ resized = torch.clip(resized, 0.0, 1.0).to(dtype)
54
+ else:
55
+ assert image.dtype == torch.uint8, "SigLIP expects float images or uint8 images, but got {}".format(image.dtype)
56
+ in_min = 0.0
57
+ in_max = 255.0
58
+ resized = torchvision.transforms.Resize(
59
+ desired_output_size,
60
+ resample,
61
+ antialias=False,
62
+ )(image)
63
+ resized = torch.clip(resized, 0, 255).to(dtype)
64
+
65
+ resized = resized.to(torch.float32)
66
+ resized = (resized - in_min) / (in_max - in_min)
67
+
68
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
69
+
70
+ return resized
71
+
72
+
73
+ def select_tiling(h, w, patch_size, max_num_crops):
74
+ """Divide in image of size [w, h] in up to max_num_patches of size patch_size"""
75
+ original_size = np.stack([h, w]) # [1, 2]
76
+ original_res = h * w
77
+ tilings = []
78
+ for i in range(1, max_num_crops + 1):
79
+ for j in range(1, max_num_crops + 1):
80
+ if i*j <= max_num_crops:
81
+ tilings.append((i, j))
82
+ # sort so argmin and argmax favour smaller tilings in the event of a tie
83
+ tilings.sort(key=lambda x: (x[0]*x[1], x[0]))
84
+ candidate_tilings = np.array(tilings, dtype=np.int32) # [n_resolutions, 2]
85
+ candidate_resolutions = candidate_tilings * patch_size # [n_resolutions, 2]
86
+
87
+ # How much we would need to scale the image to fit exactly in each tiling
88
+ original_size = np.stack([h, w], dtype=np.float32) # [1, 2]
89
+
90
+ # The original size can be zero in rare cases if the image is smaller than the margin
91
+ # In those cases letting the scale become infinite means the tiling is based on the
92
+ # other side, or falls back to the smallest tiling
93
+ with np.errstate(divide='ignore'):
94
+ required_scale_d = candidate_resolutions.astype(np.float32) / original_size,
95
+ required_scale = np.min(required_scale_d, axis=-1, keepdims=True) # [n_resolutions, 1]
96
+ if np.all(required_scale < 1):
97
+ # We are forced to downscale, so try to minimize the amount of downscaling
98
+ ix = np.argmax(required_scale)
99
+ else:
100
+ # Pick the resolution that required the least upscaling so that it most closely fits the image
101
+ required_scale = np.where(required_scale < 1.0, 10e9, required_scale)
102
+ ix = np.argmin(required_scale)
103
+ return candidate_tilings[ix]
104
+
105
+
106
+ def build_resized_image(
107
+ image: np.ndarray,
108
+ base_image_input_size: list[int],
109
+ resample: PILImageResampling,
110
+ image_mean: list[float],
111
+ image_std: list[float],
112
+ image_patch_size: int,
113
+ ) -> tuple[np.ndarray, np.ndarray]:
114
+ resized = resize_image(
115
+ image, base_image_input_size, resample,
116
+ )
117
+ resized = normalize_image(resized, image_mean, image_std)
118
+ if len(resized.shape) == 3:
119
+ resized = np.expand_dims(resized, 0)
120
+ crop_patch_w = base_image_input_size[1] // image_patch_size
121
+ crop_patch_h = base_image_input_size[0] // image_patch_size
122
+ resize_idx = np.arange(crop_patch_w*crop_patch_h).reshape([crop_patch_h, crop_patch_w])
123
+ return resized, resize_idx
124
+
125
+
126
+ def build_overlapping_crops(
127
+ image: np.ndarray,
128
+ max_crops: int,
129
+ overlap_margins: list[int],
130
+ base_image_input_size: list[int],
131
+ resample: PILImageResampling,
132
+ image_mean: list[float],
133
+ image_std: list[float],
134
+ image_patch_size: int,
135
+ ) -> tuple[np.ndarray, np.ndarray]:
136
+ """Decompose an image into a set of overlapping crops
137
+
138
+ :return crop_arr: [n_crops, h, w, 3] The crops
139
+ :return patch_idx: [overlap_patch_h, overlap_patch_w] For each patch in the resized image
140
+ the crops were extracted from, what patch in `crop_arr` it corresponds to
141
+ """
142
+ original_image_h, original_image_w = image.shape[:2]
143
+ crop_size = base_image_input_size[0]
144
+ assert base_image_input_size[0] == base_image_input_size[1]
145
+
146
+ left_margin, right_margin = overlap_margins
147
+ total_margin_pixels = image_patch_size * (right_margin + left_margin) # pixels removed per dim
148
+ crop_patches = base_image_input_size[0] // image_patch_size # patches per crop dim
149
+ crop_window_patches = crop_patches - (right_margin + left_margin) # usable patches
150
+ crop_window_size = crop_window_patches * image_patch_size
151
+ crop_patch_w = base_image_input_size[1] // image_patch_size
152
+ crop_patch_h = base_image_input_size[0] // image_patch_size
153
+ original_image_h, original_image_w = image.shape[:2]
154
+ crop_size = base_image_input_size[0]
155
+
156
+ # Decide how to tile the image, to account for the overlap margins we compute the tiling
157
+ # as if we had an image without the margins and were using a crop size without the margins
158
+ tiling = select_tiling(
159
+ original_image_h - total_margin_pixels,
160
+ original_image_w - total_margin_pixels,
161
+ crop_window_size,
162
+ max_crops,
163
+ )
164
+
165
+ src = resize_image(
166
+ image,
167
+ [tiling[0]*crop_window_size+total_margin_pixels, tiling[1]*crop_window_size+total_margin_pixels],
168
+ resample,
169
+ )
170
+ src = normalize_image(src, image_mean, image_std)
171
+
172
+ # Now we have to split the image into crops, and track what patches came from
173
+ # where in `patch_idx_arr`
174
+ n_crops = tiling[0] * tiling[1]
175
+ crop_arr = np.zeros([n_crops, crop_size, crop_size, 3], dtype=src.dtype)
176
+ patch_idx_arr = np.zeros([n_crops, crop_patch_h, crop_patch_w], dtype=np.int32)
177
+ on_crop = 0
178
+ for i in range(tiling[0]):
179
+ # Slide over `src` by `crop_window_size` steps, but extract crops of size `crops_size`
180
+ # which results in overlapping crop windows
181
+ y0 = i*crop_window_size
182
+ for j in range(tiling[1]):
183
+ x0 = j*crop_window_size
184
+ crop_arr[on_crop] = src[y0:y0+crop_size, x0:x0+crop_size]
185
+ patch_idx = np.arange(crop_patch_w*crop_patch_h).reshape(crop_patch_h, crop_patch_w)
186
+ patch_idx += on_crop * crop_patch_h * crop_patch_w
187
+
188
+ # Mask out idx that are in the overlap region
189
+ if i != 0:
190
+ patch_idx[:left_margin, :] = -1
191
+ if j != 0:
192
+ patch_idx[:, :left_margin] = -1
193
+ if i != tiling[0]-1:
194
+ patch_idx[-right_margin:, :] = -1
195
+ if j != tiling[1]-1:
196
+ patch_idx[:, -right_margin:] = -1
197
+ patch_idx_arr[on_crop] = patch_idx
198
+ on_crop += 1
199
+
200
+ # `patch_idx_arr` is ordered crop-by-crop, here we transpose `patch_idx_arr`
201
+ # so it is ordered left-to-right order
202
+ patch_idx_arr = np.reshape(
203
+ patch_idx_arr,
204
+ [tiling[0], tiling[1], crop_patch_h, crop_patch_w]
205
+ )
206
+ patch_idx_arr = np.transpose(patch_idx_arr, [0, 2, 1, 3])
207
+ patch_idx_arr = np.reshape(patch_idx_arr, [-1])
208
+
209
+ # Now get the parts not in the overlap region, so it should map each patch in `src`
210
+ # to the correct patch it should come from in `crop_arr`
211
+ patch_idx_arr = patch_idx_arr[patch_idx_arr >= 0].reshape(
212
+ src.shape[0]//image_patch_size,
213
+ src.shape[1]//image_patch_size,
214
+ )
215
+ return crop_arr, patch_idx_arr
216
+
217
+
218
+ def batch_pixels_to_patches(array: np.ndarray, patch_size: int) -> np.ndarray:
219
+ """Reshape images of [n_images, h, w, 3] -> [n_images, n_patches, pixels_per_patch]"""
220
+ if len(array.shape) == 3:
221
+ n_crops, h, w = array.shape
222
+ h_patches = h//patch_size
223
+ w_patches = w//patch_size
224
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size])
225
+ array = np.transpose(array, [0, 1, 3, 2, 4])
226
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size])
227
+ return array
228
+ else:
229
+ n_crops, h, w, c = array.shape
230
+ h_patches = h//patch_size
231
+ w_patches = w//patch_size
232
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size, c])
233
+ array = np.transpose(array, [0, 1, 3, 2, 4, 5])
234
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size*c])
235
+ return array
236
+
237
+
238
+ def arange_for_pooling(
239
+ idx_arr: np.ndarray,
240
+ pool_h: int,
241
+ pool_w: int,
242
+ ) -> np.ndarray:
243
+ h_pad = pool_h * ((idx_arr.shape[0] + pool_h - 1) // pool_h) - idx_arr.shape[0]
244
+ w_pad = pool_w * ((idx_arr.shape[1] + pool_w - 1) // pool_w) - idx_arr.shape[1]
245
+ idx_arr = np.pad(idx_arr, [[h_pad//2, (h_pad+1)//2], [w_pad//2, (w_pad+1)//2]],
246
+ mode='constant',constant_values=-1)
247
+ return einops.rearrange(
248
+ idx_arr, "(h dh) (w dw) -> h w (dh dw)", dh=pool_h, dw=pool_w)
249
+
250
+
251
+ def image_to_patches_and_grids(
252
+ image: np.ndarray,
253
+ max_crops: int,
254
+ overlap_margins: list[int],
255
+ base_image_input_size: list[int],
256
+ resample: PILImageResampling,
257
+ image_mean: list[float],
258
+ image_std: list[float],
259
+ image_patch_size: int,
260
+ image_pooling_w: int,
261
+ image_pooling_h: int,
262
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
263
+ """
264
+ :return image_grids, the shape of each (low-res, high-res) image after pooling
265
+ :return crops, the image crops to processes with the ViT
266
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
267
+ patches in `crops` to pool for that token, masked with -1
268
+ :rturn patch_idx_arr, map patch coordiantes to patch ids
269
+ """
270
+ if isinstance(base_image_input_size, int):
271
+ base_image_input_size = (base_image_input_size, base_image_input_size)
272
+
273
+ base_image_input_d = image_patch_size
274
+ pooling_w = image_pooling_w
275
+ pooling_h = image_pooling_h
276
+ crop_patch_w = base_image_input_size[1] // base_image_input_d
277
+ crop_patch_h = base_image_input_size[0] // base_image_input_d
278
+
279
+ crop_arr, patch_idx_arr = build_overlapping_crops(
280
+ image,
281
+ max_crops,
282
+ overlap_margins,
283
+ base_image_input_size,
284
+ resample,
285
+ image_mean,
286
+ image_std,
287
+ image_patch_size,
288
+ )
289
+ pooling_idx = arange_for_pooling(patch_idx_arr, pooling_h, pooling_w)
290
+ h, w = pooling_idx.shape[:2]
291
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
292
+
293
+ # Finally do the same for the global image
294
+ resized, resize_idx = build_resized_image(
295
+ image,
296
+ base_image_input_size,
297
+ resample,
298
+ image_mean,
299
+ image_std,
300
+ image_patch_size,
301
+ )
302
+ patch_idx_arr += crop_patch_h*crop_patch_w
303
+ crop_arr = np.concatenate([resized, crop_arr], 0)
304
+
305
+ resize_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
306
+ resized_h, resized_w = resize_idx.shape[:2]
307
+ resize_idx = resize_idx.reshape([-1, pooling_h*pooling_w])
308
+
309
+ # Global image goes first, so the order of patches in previous crops gets increased
310
+ pooling_idx = np.where(
311
+ pooling_idx >= 0,
312
+ pooling_idx + crop_patch_h*crop_patch_w,
313
+ -1
314
+ )
315
+ pooling_idx = np.concatenate([resize_idx, pooling_idx])
316
+ image_grid = [np.array([resized_h, resized_w, h, w])]
317
+
318
+ return (
319
+ np.stack(image_grid, 0),
320
+ batch_pixels_to_patches(crop_arr, image_patch_size),
321
+ pooling_idx,
322
+ patch_idx_arr
323
+ )
324
+
325
+
326
+ class Molmo2ImagesKwargs(ImagesKwargs, total=False):
327
+ max_crops: Optional[int]
328
+ overlap_margins: Optional[list[int]]
329
+ patch_size: Optional[int]
330
+ pooling_size: Optional[list[int]]
331
+
332
+
333
+ class Molmo2ImageProcessor(BaseImageProcessor):
334
+ r"""
335
+ Constructs a Molmo2 image processor that preprocesses images for the model.
336
+
337
+ Args:
338
+ size (`dict[str, int]` *optional*, defaults to `{"height": 378, "width": 378}`):
339
+ Size of the image after resizing.
340
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
341
+ Resampling filter to use when resizing the image.
342
+ image_mean (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
343
+ Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
344
+ image_std (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
345
+ Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
346
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
347
+ Whether to convert the image to RGB.
348
+ max_crops (`int`, *optional*, defaults to `8`):
349
+ Maximum number of crops to use per image.
350
+ overlap_margins (`list[int]`, *optional*, defaults to `[4, 4]`):
351
+ Overlap margins to use.
352
+ patch_size (`int`, *optional*, defaults to 14):
353
+ The spatial patch size of the vision encoder.
354
+ pooling_size (`list[int]`, *optional*, defaults to `[2, 2]`):
355
+ The pooling size of the vision adapter.
356
+ """
357
+
358
+ model_input_names = ["pixel_values", "image_token_pooling", "image_grids", "image_num_crops"]
359
+
360
+ def __init__(
361
+ self,
362
+ size: Optional[dict[str, int]] = None,
363
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
364
+ image_mean: Optional[Union[float, list[float]]] = None,
365
+ image_std: Optional[Union[float, list[float]]] = None,
366
+ do_convert_rgb: bool = True,
367
+ max_crops: int = 8,
368
+ overlap_margins: list[int] = [4, 4],
369
+ patch_size: int = 14,
370
+ pooling_size: list[int] = [2, 2],
371
+ **kwargs,
372
+ ) -> None:
373
+ super().__init__(**kwargs)
374
+ size = size if size is not None else {"height": 378, "width": 378}
375
+ size = get_size_dict(size, default_to_square=True)
376
+ self.size = size
377
+
378
+ self.resample = resample
379
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
380
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
381
+ self.do_convert_rgb = do_convert_rgb
382
+
383
+ self.max_crops = max_crops
384
+ self.overlap_margins = overlap_margins
385
+ self.patch_size = patch_size
386
+ self.pooling_size = pooling_size
387
+
388
+ def preprocess(
389
+ self,
390
+ images: ImageInput,
391
+ size: Optional[dict[str, int]] = None,
392
+ resample: Optional[PILImageResampling] = None,
393
+ image_mean: Optional[Union[float, list[float]]] = None,
394
+ image_std: Optional[Union[float, list[float]]] = None,
395
+ do_convert_rgb: Optional[bool] = None,
396
+ max_crops: Optional[int] = None,
397
+ overlap_margins: Optional[list[int]] = None,
398
+ patch_size: Optional[int] = None,
399
+ pooling_size: Optional[list[int]] = None,
400
+ return_tensors: Optional[Union[str, TensorType]] = None,
401
+ return_pointing_metadata: bool = False,
402
+ **kwargs,
403
+ ) -> BatchFeature:
404
+ """
405
+ Args:
406
+ images (`ImageInput`):
407
+ Image to preprocess.
408
+ size (`dict[str, int]`, *optional*, defaults to `self.size`):
409
+ Size of the image after resizing.
410
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
411
+ Resampling filter to use when resizing the image. This can be one of the enum `PILImageResampling`. Only
412
+ has an effect if `do_resize` is set to `True`.
413
+ image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
414
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
415
+ image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
416
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
417
+ `True`.
418
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
419
+ Whether to convert the image to RGB.
420
+ max_crops (`int`, *optional*, defaults to `self.max_crops`):
421
+ Maximum number of crops to use per image.
422
+ overlap_margins (`list[int]`, *optional*, defaults to `self.overlap_margins`):
423
+ Overlap margins to use.
424
+ patch_size (`int`, *optional*, defaults to `self.patch_size`):
425
+ The spatial patch size of the vision encoder.
426
+ pooling_size (`list[int]`, *optional*, defaults to `self.pooling_size`):
427
+ The pooling size of the vision adapter.
428
+ return_tensors (`str` or `TensorType`, *optional*):
429
+ The type of tensors to return. Can be one of:
430
+ - Unset: Return a list of `np.ndarray`.
431
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
432
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
433
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
434
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
435
+ return_patch_mappings (bool, optional):
436
+ Whether to return patch mappings used for decoding MolmoPoint points
437
+
438
+ Returns:
439
+ A `BatchFeature` containing the following keys:
440
+ - `pixel_values`: The preprocessed images.
441
+ - `image_token_pooling`: The indices of the patches in `crops` to pool for each token in `image_tokens`.
442
+ - `image_grids`: The image grids.
443
+ - `image_num_crops`: The number of crops for each image.
444
+ """
445
+ if size is not None:
446
+ if "height" not in size or "width" not in size:
447
+ raise ValueError("size must contain 'height' and 'width' keys.")
448
+ else:
449
+ size = {**self.size}
450
+
451
+ base_image_input_size = [size["height"], size["width"]]
452
+
453
+ resample = resample or self.resample
454
+ image_mean = image_mean or self.image_mean
455
+ image_std = image_std or self.image_std
456
+ do_convert_rgb = do_convert_rgb or self.do_convert_rgb
457
+
458
+ max_crops = max_crops or self.max_crops
459
+ overlap_margins = overlap_margins or self.overlap_margins
460
+ patch_size = patch_size or self.patch_size
461
+ pooling_size = pooling_size or self.pooling_size
462
+
463
+ image_pooling_h, image_pooling_w = pooling_size
464
+
465
+ if images is not None:
466
+ images = self.fetch_images(images)
467
+ images = make_flat_list_of_images(images)
468
+
469
+ if images is not None and not valid_images(images):
470
+ raise ValueError(
471
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
472
+ "torch.Tensor, tf.Tensor or jax.ndarray."
473
+ )
474
+
475
+ if do_convert_rgb:
476
+ images = [convert_to_rgb(image) for image in images]
477
+
478
+ # All transformations expect numpy arrays.
479
+ images = [to_numpy_array(image) for image in images]
480
+
481
+ data = {}
482
+ patch_mappings = []
483
+ absolute_token_pooling = []
484
+ offset = 0
485
+ if images is not None:
486
+ batch_grids = []
487
+ batch_crops = []
488
+ batch_pooled_patches_idx = []
489
+ batch_num_crops = []
490
+
491
+ for image in images:
492
+ image_grid, crops, pooled_idx, patch_mapping = image_to_patches_and_grids(
493
+ image,
494
+ max_crops,
495
+ overlap_margins,
496
+ base_image_input_size,
497
+ resample,
498
+ image_mean,
499
+ image_std,
500
+ patch_size,
501
+ image_pooling_w,
502
+ image_pooling_h,
503
+ )
504
+ batch_grids.append(image_grid)
505
+ batch_crops.append(crops)
506
+ batch_pooled_patches_idx.append(pooled_idx)
507
+ batch_num_crops.append(crops.shape[0])
508
+ if return_pointing_metadata:
509
+ absolute_token_pooling.append(
510
+ np.where(pooled_idx >= 0, pooled_idx + offset, -1))
511
+ patch_mappings.append(patch_mapping + offset)
512
+ n_patches = np.prod(crops.shape[:2])
513
+ offset += n_patches
514
+
515
+ pixel_values = np.concatenate(batch_crops, 0)
516
+ image_token_pooling = np.concatenate(batch_pooled_patches_idx, 0)
517
+ image_grids = np.concatenate(batch_grids, 0)
518
+ image_num_crops = np.array(batch_num_crops)
519
+
520
+ data.update(
521
+ pixel_values=pixel_values,
522
+ image_token_pooling=image_token_pooling,
523
+ image_grids=image_grids,
524
+ image_num_crops=image_num_crops,
525
+ )
526
+
527
+ data = BatchFeature(data, tensor_type=return_tensors)
528
+ if return_pointing_metadata:
529
+ data["image_token_pooling_np"] = np.concatenate(absolute_token_pooling, 0) if len(images) else None
530
+ data["subpatch_mapping"] = patch_mappings
531
+ data["image_sizes"] = [x.shape[:2][::-1] for x in images]
532
+ return data
533
+
534
+
535
+ Molmo2ImageProcessor.register_for_auto_class()
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2efe3c1c4a515f7a3b3131a92f9c59b91e547d78666cdfdafdc8c9855be5dd
3
+ size 4891799000
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18ea780da606f668723ea74e0dcdebf10c6475972a5af0772aa32824383e545a
3
+ size 4844690992
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0f241b51e633bf23b2d19a97ad4628e9eecc10935f4e0fd0fcdf3315ff401c5
3
+ size 4844691024
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:266b0e351332d41297997783720fa2c32cf829cd8801e2c428f0833f48e80c12
3
+ size 4867859988
model.safetensors.index.json ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 4862237657,
4
+ "total_size": 19448950628
5
+ },
6
+ "weight_map": {
7
+ "lm_head.new_output_embeddings": "model-00004-of-00004.safetensors",
8
+ "lm_head.output_embeddings": "model-00004-of-00004.safetensors",
9
+ "model.build_vit_embedding.bias": "model-00004-of-00004.safetensors",
10
+ "model.build_vit_embedding.weight": "model-00004-of-00004.safetensors",
11
+ "model.connector.image_pooling_2d.wk.bias": "model-00004-of-00004.safetensors",
12
+ "model.connector.image_pooling_2d.wk.weight": "model-00004-of-00004.safetensors",
13
+ "model.connector.image_pooling_2d.wo.bias": "model-00004-of-00004.safetensors",
14
+ "model.connector.image_pooling_2d.wo.weight": "model-00004-of-00004.safetensors",
15
+ "model.connector.image_pooling_2d.wq.bias": "model-00004-of-00004.safetensors",
16
+ "model.connector.image_pooling_2d.wq.weight": "model-00004-of-00004.safetensors",
17
+ "model.connector.image_pooling_2d.wv.bias": "model-00004-of-00004.safetensors",
18
+ "model.connector.image_pooling_2d.wv.weight": "model-00004-of-00004.safetensors",
19
+ "model.connector.image_projector.w1.weight": "model-00004-of-00004.safetensors",
20
+ "model.connector.image_projector.w2.weight": "model-00004-of-00004.safetensors",
21
+ "model.connector.image_projector.w3.weight": "model-00004-of-00004.safetensors",
22
+ "model.point_predictor.add_no_point_class_embed.vector": "model-00004-of-00004.safetensors",
23
+ "model.point_predictor.patch_k.bias": "model-00004-of-00004.safetensors",
24
+ "model.point_predictor.patch_k.weight": "model-00004-of-00004.safetensors",
25
+ "model.point_predictor.patch_q.bias": "model-00004-of-00004.safetensors",
26
+ "model.point_predictor.patch_q.weight": "model-00004-of-00004.safetensors",
27
+ "model.point_predictor.subpatch_k.bias": "model-00004-of-00004.safetensors",
28
+ "model.point_predictor.subpatch_k.weight": "model-00004-of-00004.safetensors",
29
+ "model.point_predictor.subpatch_loc_k.bias": "model-00004-of-00004.safetensors",
30
+ "model.point_predictor.subpatch_loc_k.weight": "model-00004-of-00004.safetensors",
31
+ "model.point_predictor.subpatch_q.bias": "model-00004-of-00004.safetensors",
32
+ "model.point_predictor.subpatch_q.weight": "model-00004-of-00004.safetensors",
33
+ "model.point_predictor.x_norm.weight": "model-00004-of-00004.safetensors",
34
+ "model.transformer.blocks.0.attn_norm.weight": "model-00001-of-00004.safetensors",
35
+ "model.transformer.blocks.0.ff_norm.weight": "model-00001-of-00004.safetensors",
36
+ "model.transformer.blocks.0.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
37
+ "model.transformer.blocks.0.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
38
+ "model.transformer.blocks.0.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
39
+ "model.transformer.blocks.0.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
40
+ "model.transformer.blocks.0.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
41
+ "model.transformer.blocks.0.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
42
+ "model.transformer.blocks.1.attn_norm.weight": "model-00001-of-00004.safetensors",
43
+ "model.transformer.blocks.1.ff_norm.weight": "model-00001-of-00004.safetensors",
44
+ "model.transformer.blocks.1.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
45
+ "model.transformer.blocks.1.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
46
+ "model.transformer.blocks.1.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
47
+ "model.transformer.blocks.1.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
48
+ "model.transformer.blocks.1.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
49
+ "model.transformer.blocks.1.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
50
+ "model.transformer.blocks.10.attn_norm.weight": "model-00002-of-00004.safetensors",
51
+ "model.transformer.blocks.10.ff_norm.weight": "model-00002-of-00004.safetensors",
52
+ "model.transformer.blocks.10.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
53
+ "model.transformer.blocks.10.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.transformer.blocks.10.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.transformer.blocks.10.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
56
+ "model.transformer.blocks.10.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
57
+ "model.transformer.blocks.10.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
58
+ "model.transformer.blocks.11.attn_norm.weight": "model-00002-of-00004.safetensors",
59
+ "model.transformer.blocks.11.ff_norm.weight": "model-00002-of-00004.safetensors",
60
+ "model.transformer.blocks.11.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
61
+ "model.transformer.blocks.11.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.transformer.blocks.11.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.transformer.blocks.11.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
64
+ "model.transformer.blocks.11.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
65
+ "model.transformer.blocks.11.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
66
+ "model.transformer.blocks.12.attn_norm.weight": "model-00002-of-00004.safetensors",
67
+ "model.transformer.blocks.12.ff_norm.weight": "model-00002-of-00004.safetensors",
68
+ "model.transformer.blocks.12.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
69
+ "model.transformer.blocks.12.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.transformer.blocks.12.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.transformer.blocks.12.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
72
+ "model.transformer.blocks.12.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
73
+ "model.transformer.blocks.12.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
74
+ "model.transformer.blocks.13.attn_norm.weight": "model-00002-of-00004.safetensors",
75
+ "model.transformer.blocks.13.ff_norm.weight": "model-00002-of-00004.safetensors",
76
+ "model.transformer.blocks.13.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
77
+ "model.transformer.blocks.13.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.transformer.blocks.13.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.transformer.blocks.13.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
80
+ "model.transformer.blocks.13.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
81
+ "model.transformer.blocks.13.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
82
+ "model.transformer.blocks.14.attn_norm.weight": "model-00002-of-00004.safetensors",
83
+ "model.transformer.blocks.14.ff_norm.weight": "model-00002-of-00004.safetensors",
84
+ "model.transformer.blocks.14.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
85
+ "model.transformer.blocks.14.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.transformer.blocks.14.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.transformer.blocks.14.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
88
+ "model.transformer.blocks.14.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
89
+ "model.transformer.blocks.14.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
90
+ "model.transformer.blocks.15.attn_norm.weight": "model-00002-of-00004.safetensors",
91
+ "model.transformer.blocks.15.ff_norm.weight": "model-00002-of-00004.safetensors",
92
+ "model.transformer.blocks.15.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
93
+ "model.transformer.blocks.15.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.transformer.blocks.15.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.transformer.blocks.15.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
96
+ "model.transformer.blocks.15.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
97
+ "model.transformer.blocks.15.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
98
+ "model.transformer.blocks.16.attn_norm.weight": "model-00002-of-00004.safetensors",
99
+ "model.transformer.blocks.16.ff_norm.weight": "model-00002-of-00004.safetensors",
100
+ "model.transformer.blocks.16.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
101
+ "model.transformer.blocks.16.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.transformer.blocks.16.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
103
+ "model.transformer.blocks.16.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
104
+ "model.transformer.blocks.16.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
105
+ "model.transformer.blocks.16.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
106
+ "model.transformer.blocks.17.attn_norm.weight": "model-00002-of-00004.safetensors",
107
+ "model.transformer.blocks.17.ff_norm.weight": "model-00002-of-00004.safetensors",
108
+ "model.transformer.blocks.17.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
109
+ "model.transformer.blocks.17.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.transformer.blocks.17.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.transformer.blocks.17.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
112
+ "model.transformer.blocks.17.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
113
+ "model.transformer.blocks.17.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
114
+ "model.transformer.blocks.18.attn_norm.weight": "model-00002-of-00004.safetensors",
115
+ "model.transformer.blocks.18.ff_norm.weight": "model-00002-of-00004.safetensors",
116
+ "model.transformer.blocks.18.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
117
+ "model.transformer.blocks.18.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.transformer.blocks.18.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.transformer.blocks.18.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
120
+ "model.transformer.blocks.18.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
121
+ "model.transformer.blocks.18.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
122
+ "model.transformer.blocks.19.attn_norm.weight": "model-00002-of-00004.safetensors",
123
+ "model.transformer.blocks.19.ff_norm.weight": "model-00002-of-00004.safetensors",
124
+ "model.transformer.blocks.19.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
125
+ "model.transformer.blocks.19.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
126
+ "model.transformer.blocks.19.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
127
+ "model.transformer.blocks.19.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
128
+ "model.transformer.blocks.19.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
129
+ "model.transformer.blocks.19.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
130
+ "model.transformer.blocks.2.attn_norm.weight": "model-00001-of-00004.safetensors",
131
+ "model.transformer.blocks.2.ff_norm.weight": "model-00001-of-00004.safetensors",
132
+ "model.transformer.blocks.2.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
133
+ "model.transformer.blocks.2.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
134
+ "model.transformer.blocks.2.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
135
+ "model.transformer.blocks.2.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
136
+ "model.transformer.blocks.2.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
137
+ "model.transformer.blocks.2.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
138
+ "model.transformer.blocks.20.attn_norm.weight": "model-00002-of-00004.safetensors",
139
+ "model.transformer.blocks.20.ff_norm.weight": "model-00003-of-00004.safetensors",
140
+ "model.transformer.blocks.20.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
141
+ "model.transformer.blocks.20.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.transformer.blocks.20.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
143
+ "model.transformer.blocks.20.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
144
+ "model.transformer.blocks.20.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
145
+ "model.transformer.blocks.20.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
146
+ "model.transformer.blocks.21.attn_norm.weight": "model-00003-of-00004.safetensors",
147
+ "model.transformer.blocks.21.ff_norm.weight": "model-00003-of-00004.safetensors",
148
+ "model.transformer.blocks.21.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
149
+ "model.transformer.blocks.21.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.transformer.blocks.21.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.transformer.blocks.21.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
152
+ "model.transformer.blocks.21.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
153
+ "model.transformer.blocks.21.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
154
+ "model.transformer.blocks.22.attn_norm.weight": "model-00003-of-00004.safetensors",
155
+ "model.transformer.blocks.22.ff_norm.weight": "model-00003-of-00004.safetensors",
156
+ "model.transformer.blocks.22.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
157
+ "model.transformer.blocks.22.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
158
+ "model.transformer.blocks.22.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
159
+ "model.transformer.blocks.22.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
160
+ "model.transformer.blocks.22.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
161
+ "model.transformer.blocks.22.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
162
+ "model.transformer.blocks.23.attn_norm.weight": "model-00003-of-00004.safetensors",
163
+ "model.transformer.blocks.23.ff_norm.weight": "model-00003-of-00004.safetensors",
164
+ "model.transformer.blocks.23.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
165
+ "model.transformer.blocks.23.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.transformer.blocks.23.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.transformer.blocks.23.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
168
+ "model.transformer.blocks.23.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
169
+ "model.transformer.blocks.23.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
170
+ "model.transformer.blocks.24.attn_norm.weight": "model-00003-of-00004.safetensors",
171
+ "model.transformer.blocks.24.ff_norm.weight": "model-00003-of-00004.safetensors",
172
+ "model.transformer.blocks.24.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
173
+ "model.transformer.blocks.24.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.transformer.blocks.24.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
175
+ "model.transformer.blocks.24.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
176
+ "model.transformer.blocks.24.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
177
+ "model.transformer.blocks.24.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
178
+ "model.transformer.blocks.25.attn_norm.weight": "model-00003-of-00004.safetensors",
179
+ "model.transformer.blocks.25.ff_norm.weight": "model-00003-of-00004.safetensors",
180
+ "model.transformer.blocks.25.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
181
+ "model.transformer.blocks.25.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
182
+ "model.transformer.blocks.25.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.transformer.blocks.25.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
184
+ "model.transformer.blocks.25.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
185
+ "model.transformer.blocks.25.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
186
+ "model.transformer.blocks.26.attn_norm.weight": "model-00003-of-00004.safetensors",
187
+ "model.transformer.blocks.26.ff_norm.weight": "model-00003-of-00004.safetensors",
188
+ "model.transformer.blocks.26.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
189
+ "model.transformer.blocks.26.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.transformer.blocks.26.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.transformer.blocks.26.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
192
+ "model.transformer.blocks.26.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
193
+ "model.transformer.blocks.26.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
194
+ "model.transformer.blocks.27.attn_norm.weight": "model-00003-of-00004.safetensors",
195
+ "model.transformer.blocks.27.ff_norm.weight": "model-00003-of-00004.safetensors",
196
+ "model.transformer.blocks.27.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
197
+ "model.transformer.blocks.27.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.transformer.blocks.27.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.transformer.blocks.27.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
200
+ "model.transformer.blocks.27.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
201
+ "model.transformer.blocks.27.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
202
+ "model.transformer.blocks.28.attn_norm.weight": "model-00003-of-00004.safetensors",
203
+ "model.transformer.blocks.28.ff_norm.weight": "model-00003-of-00004.safetensors",
204
+ "model.transformer.blocks.28.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
205
+ "model.transformer.blocks.28.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
206
+ "model.transformer.blocks.28.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.transformer.blocks.28.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
208
+ "model.transformer.blocks.28.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
209
+ "model.transformer.blocks.28.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
210
+ "model.transformer.blocks.29.attn_norm.weight": "model-00003-of-00004.safetensors",
211
+ "model.transformer.blocks.29.ff_norm.weight": "model-00003-of-00004.safetensors",
212
+ "model.transformer.blocks.29.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
213
+ "model.transformer.blocks.29.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.transformer.blocks.29.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.transformer.blocks.29.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
216
+ "model.transformer.blocks.29.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
217
+ "model.transformer.blocks.29.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
218
+ "model.transformer.blocks.3.attn_norm.weight": "model-00001-of-00004.safetensors",
219
+ "model.transformer.blocks.3.ff_norm.weight": "model-00001-of-00004.safetensors",
220
+ "model.transformer.blocks.3.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
221
+ "model.transformer.blocks.3.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
222
+ "model.transformer.blocks.3.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
223
+ "model.transformer.blocks.3.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
224
+ "model.transformer.blocks.3.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
225
+ "model.transformer.blocks.3.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
226
+ "model.transformer.blocks.30.attn_norm.weight": "model-00003-of-00004.safetensors",
227
+ "model.transformer.blocks.30.ff_norm.weight": "model-00003-of-00004.safetensors",
228
+ "model.transformer.blocks.30.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
229
+ "model.transformer.blocks.30.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
230
+ "model.transformer.blocks.30.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.transformer.blocks.30.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
232
+ "model.transformer.blocks.30.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
233
+ "model.transformer.blocks.30.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
234
+ "model.transformer.blocks.31.attn_norm.weight": "model-00003-of-00004.safetensors",
235
+ "model.transformer.blocks.31.ff_norm.weight": "model-00003-of-00004.safetensors",
236
+ "model.transformer.blocks.31.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
237
+ "model.transformer.blocks.31.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.transformer.blocks.31.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.transformer.blocks.31.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
240
+ "model.transformer.blocks.31.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
241
+ "model.transformer.blocks.31.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
242
+ "model.transformer.blocks.32.attn_norm.weight": "model-00003-of-00004.safetensors",
243
+ "model.transformer.blocks.32.ff_norm.weight": "model-00004-of-00004.safetensors",
244
+ "model.transformer.blocks.32.mlp.ff_out.weight": "model-00004-of-00004.safetensors",
245
+ "model.transformer.blocks.32.mlp.ff_proj.weight": "model-00004-of-00004.safetensors",
246
+ "model.transformer.blocks.32.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.transformer.blocks.32.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
248
+ "model.transformer.blocks.32.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
249
+ "model.transformer.blocks.32.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
250
+ "model.transformer.blocks.33.attn_norm.weight": "model-00004-of-00004.safetensors",
251
+ "model.transformer.blocks.33.ff_norm.weight": "model-00004-of-00004.safetensors",
252
+ "model.transformer.blocks.33.mlp.ff_out.weight": "model-00004-of-00004.safetensors",
253
+ "model.transformer.blocks.33.mlp.ff_proj.weight": "model-00004-of-00004.safetensors",
254
+ "model.transformer.blocks.33.self_attn.att_proj.weight": "model-00004-of-00004.safetensors",
255
+ "model.transformer.blocks.33.self_attn.attn_out.weight": "model-00004-of-00004.safetensors",
256
+ "model.transformer.blocks.33.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
257
+ "model.transformer.blocks.33.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
258
+ "model.transformer.blocks.34.attn_norm.weight": "model-00004-of-00004.safetensors",
259
+ "model.transformer.blocks.34.ff_norm.weight": "model-00004-of-00004.safetensors",
260
+ "model.transformer.blocks.34.mlp.ff_out.weight": "model-00004-of-00004.safetensors",
261
+ "model.transformer.blocks.34.mlp.ff_proj.weight": "model-00004-of-00004.safetensors",
262
+ "model.transformer.blocks.34.self_attn.att_proj.weight": "model-00004-of-00004.safetensors",
263
+ "model.transformer.blocks.34.self_attn.attn_out.weight": "model-00004-of-00004.safetensors",
264
+ "model.transformer.blocks.34.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
265
+ "model.transformer.blocks.34.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
266
+ "model.transformer.blocks.35.attn_norm.weight": "model-00004-of-00004.safetensors",
267
+ "model.transformer.blocks.35.ff_norm.weight": "model-00004-of-00004.safetensors",
268
+ "model.transformer.blocks.35.mlp.ff_out.weight": "model-00004-of-00004.safetensors",
269
+ "model.transformer.blocks.35.mlp.ff_proj.weight": "model-00004-of-00004.safetensors",
270
+ "model.transformer.blocks.35.self_attn.att_proj.weight": "model-00004-of-00004.safetensors",
271
+ "model.transformer.blocks.35.self_attn.attn_out.weight": "model-00004-of-00004.safetensors",
272
+ "model.transformer.blocks.35.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
273
+ "model.transformer.blocks.35.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
274
+ "model.transformer.blocks.4.attn_norm.weight": "model-00001-of-00004.safetensors",
275
+ "model.transformer.blocks.4.ff_norm.weight": "model-00001-of-00004.safetensors",
276
+ "model.transformer.blocks.4.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
277
+ "model.transformer.blocks.4.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
278
+ "model.transformer.blocks.4.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
279
+ "model.transformer.blocks.4.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
280
+ "model.transformer.blocks.4.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
281
+ "model.transformer.blocks.4.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
282
+ "model.transformer.blocks.5.attn_norm.weight": "model-00001-of-00004.safetensors",
283
+ "model.transformer.blocks.5.ff_norm.weight": "model-00001-of-00004.safetensors",
284
+ "model.transformer.blocks.5.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
285
+ "model.transformer.blocks.5.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
286
+ "model.transformer.blocks.5.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.transformer.blocks.5.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
288
+ "model.transformer.blocks.5.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
289
+ "model.transformer.blocks.5.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
290
+ "model.transformer.blocks.6.attn_norm.weight": "model-00001-of-00004.safetensors",
291
+ "model.transformer.blocks.6.ff_norm.weight": "model-00001-of-00004.safetensors",
292
+ "model.transformer.blocks.6.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
293
+ "model.transformer.blocks.6.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
294
+ "model.transformer.blocks.6.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
295
+ "model.transformer.blocks.6.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
296
+ "model.transformer.blocks.6.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
297
+ "model.transformer.blocks.6.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
298
+ "model.transformer.blocks.7.attn_norm.weight": "model-00001-of-00004.safetensors",
299
+ "model.transformer.blocks.7.ff_norm.weight": "model-00001-of-00004.safetensors",
300
+ "model.transformer.blocks.7.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
301
+ "model.transformer.blocks.7.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
302
+ "model.transformer.blocks.7.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
303
+ "model.transformer.blocks.7.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
304
+ "model.transformer.blocks.7.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
305
+ "model.transformer.blocks.7.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
306
+ "model.transformer.blocks.8.attn_norm.weight": "model-00001-of-00004.safetensors",
307
+ "model.transformer.blocks.8.ff_norm.weight": "model-00002-of-00004.safetensors",
308
+ "model.transformer.blocks.8.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
309
+ "model.transformer.blocks.8.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
310
+ "model.transformer.blocks.8.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
311
+ "model.transformer.blocks.8.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
312
+ "model.transformer.blocks.8.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
313
+ "model.transformer.blocks.8.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
314
+ "model.transformer.blocks.9.attn_norm.weight": "model-00002-of-00004.safetensors",
315
+ "model.transformer.blocks.9.ff_norm.weight": "model-00002-of-00004.safetensors",
316
+ "model.transformer.blocks.9.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
317
+ "model.transformer.blocks.9.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
318
+ "model.transformer.blocks.9.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
319
+ "model.transformer.blocks.9.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
320
+ "model.transformer.blocks.9.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
321
+ "model.transformer.blocks.9.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
322
+ "model.transformer.ln_f.weight": "model-00004-of-00004.safetensors",
323
+ "model.transformer.wte.embedding": "model-00001-of-00004.safetensors",
324
+ "model.transformer.wte.new_embedding": "model-00001-of-00004.safetensors",
325
+ "model.vit.patch_embedding.bias": "model-00004-of-00004.safetensors",
326
+ "model.vit.patch_embedding.weight": "model-00004-of-00004.safetensors",
327
+ "model.vit.positional_embedding": "model-00004-of-00004.safetensors",
328
+ "model.vit.transformer.resblocks.0.attention.wk.bias": "model-00004-of-00004.safetensors",
329
+ "model.vit.transformer.resblocks.0.attention.wk.weight": "model-00004-of-00004.safetensors",
330
+ "model.vit.transformer.resblocks.0.attention.wo.bias": "model-00004-of-00004.safetensors",
331
+ "model.vit.transformer.resblocks.0.attention.wo.weight": "model-00004-of-00004.safetensors",
332
+ "model.vit.transformer.resblocks.0.attention.wq.bias": "model-00004-of-00004.safetensors",
333
+ "model.vit.transformer.resblocks.0.attention.wq.weight": "model-00004-of-00004.safetensors",
334
+ "model.vit.transformer.resblocks.0.attention.wv.bias": "model-00004-of-00004.safetensors",
335
+ "model.vit.transformer.resblocks.0.attention.wv.weight": "model-00004-of-00004.safetensors",
336
+ "model.vit.transformer.resblocks.0.attention_norm.bias": "model-00004-of-00004.safetensors",
337
+ "model.vit.transformer.resblocks.0.attention_norm.weight": "model-00004-of-00004.safetensors",
338
+ "model.vit.transformer.resblocks.0.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
339
+ "model.vit.transformer.resblocks.0.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
340
+ "model.vit.transformer.resblocks.0.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
341
+ "model.vit.transformer.resblocks.0.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
342
+ "model.vit.transformer.resblocks.0.ffn_norm.bias": "model-00004-of-00004.safetensors",
343
+ "model.vit.transformer.resblocks.0.ffn_norm.weight": "model-00004-of-00004.safetensors",
344
+ "model.vit.transformer.resblocks.1.attention.wk.bias": "model-00004-of-00004.safetensors",
345
+ "model.vit.transformer.resblocks.1.attention.wk.weight": "model-00004-of-00004.safetensors",
346
+ "model.vit.transformer.resblocks.1.attention.wo.bias": "model-00004-of-00004.safetensors",
347
+ "model.vit.transformer.resblocks.1.attention.wo.weight": "model-00004-of-00004.safetensors",
348
+ "model.vit.transformer.resblocks.1.attention.wq.bias": "model-00004-of-00004.safetensors",
349
+ "model.vit.transformer.resblocks.1.attention.wq.weight": "model-00004-of-00004.safetensors",
350
+ "model.vit.transformer.resblocks.1.attention.wv.bias": "model-00004-of-00004.safetensors",
351
+ "model.vit.transformer.resblocks.1.attention.wv.weight": "model-00004-of-00004.safetensors",
352
+ "model.vit.transformer.resblocks.1.attention_norm.bias": "model-00004-of-00004.safetensors",
353
+ "model.vit.transformer.resblocks.1.attention_norm.weight": "model-00004-of-00004.safetensors",
354
+ "model.vit.transformer.resblocks.1.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
355
+ "model.vit.transformer.resblocks.1.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
356
+ "model.vit.transformer.resblocks.1.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
357
+ "model.vit.transformer.resblocks.1.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
358
+ "model.vit.transformer.resblocks.1.ffn_norm.bias": "model-00004-of-00004.safetensors",
359
+ "model.vit.transformer.resblocks.1.ffn_norm.weight": "model-00004-of-00004.safetensors",
360
+ "model.vit.transformer.resblocks.10.attention.wk.bias": "model-00004-of-00004.safetensors",
361
+ "model.vit.transformer.resblocks.10.attention.wk.weight": "model-00004-of-00004.safetensors",
362
+ "model.vit.transformer.resblocks.10.attention.wo.bias": "model-00004-of-00004.safetensors",
363
+ "model.vit.transformer.resblocks.10.attention.wo.weight": "model-00004-of-00004.safetensors",
364
+ "model.vit.transformer.resblocks.10.attention.wq.bias": "model-00004-of-00004.safetensors",
365
+ "model.vit.transformer.resblocks.10.attention.wq.weight": "model-00004-of-00004.safetensors",
366
+ "model.vit.transformer.resblocks.10.attention.wv.bias": "model-00004-of-00004.safetensors",
367
+ "model.vit.transformer.resblocks.10.attention.wv.weight": "model-00004-of-00004.safetensors",
368
+ "model.vit.transformer.resblocks.10.attention_norm.bias": "model-00004-of-00004.safetensors",
369
+ "model.vit.transformer.resblocks.10.attention_norm.weight": "model-00004-of-00004.safetensors",
370
+ "model.vit.transformer.resblocks.10.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
371
+ "model.vit.transformer.resblocks.10.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
372
+ "model.vit.transformer.resblocks.10.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
373
+ "model.vit.transformer.resblocks.10.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
374
+ "model.vit.transformer.resblocks.10.ffn_norm.bias": "model-00004-of-00004.safetensors",
375
+ "model.vit.transformer.resblocks.10.ffn_norm.weight": "model-00004-of-00004.safetensors",
376
+ "model.vit.transformer.resblocks.11.attention.wk.bias": "model-00004-of-00004.safetensors",
377
+ "model.vit.transformer.resblocks.11.attention.wk.weight": "model-00004-of-00004.safetensors",
378
+ "model.vit.transformer.resblocks.11.attention.wo.bias": "model-00004-of-00004.safetensors",
379
+ "model.vit.transformer.resblocks.11.attention.wo.weight": "model-00004-of-00004.safetensors",
380
+ "model.vit.transformer.resblocks.11.attention.wq.bias": "model-00004-of-00004.safetensors",
381
+ "model.vit.transformer.resblocks.11.attention.wq.weight": "model-00004-of-00004.safetensors",
382
+ "model.vit.transformer.resblocks.11.attention.wv.bias": "model-00004-of-00004.safetensors",
383
+ "model.vit.transformer.resblocks.11.attention.wv.weight": "model-00004-of-00004.safetensors",
384
+ "model.vit.transformer.resblocks.11.attention_norm.bias": "model-00004-of-00004.safetensors",
385
+ "model.vit.transformer.resblocks.11.attention_norm.weight": "model-00004-of-00004.safetensors",
386
+ "model.vit.transformer.resblocks.11.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
387
+ "model.vit.transformer.resblocks.11.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
388
+ "model.vit.transformer.resblocks.11.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
389
+ "model.vit.transformer.resblocks.11.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
390
+ "model.vit.transformer.resblocks.11.ffn_norm.bias": "model-00004-of-00004.safetensors",
391
+ "model.vit.transformer.resblocks.11.ffn_norm.weight": "model-00004-of-00004.safetensors",
392
+ "model.vit.transformer.resblocks.12.attention.wk.bias": "model-00004-of-00004.safetensors",
393
+ "model.vit.transformer.resblocks.12.attention.wk.weight": "model-00004-of-00004.safetensors",
394
+ "model.vit.transformer.resblocks.12.attention.wo.bias": "model-00004-of-00004.safetensors",
395
+ "model.vit.transformer.resblocks.12.attention.wo.weight": "model-00004-of-00004.safetensors",
396
+ "model.vit.transformer.resblocks.12.attention.wq.bias": "model-00004-of-00004.safetensors",
397
+ "model.vit.transformer.resblocks.12.attention.wq.weight": "model-00004-of-00004.safetensors",
398
+ "model.vit.transformer.resblocks.12.attention.wv.bias": "model-00004-of-00004.safetensors",
399
+ "model.vit.transformer.resblocks.12.attention.wv.weight": "model-00004-of-00004.safetensors",
400
+ "model.vit.transformer.resblocks.12.attention_norm.bias": "model-00004-of-00004.safetensors",
401
+ "model.vit.transformer.resblocks.12.attention_norm.weight": "model-00004-of-00004.safetensors",
402
+ "model.vit.transformer.resblocks.12.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
403
+ "model.vit.transformer.resblocks.12.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
404
+ "model.vit.transformer.resblocks.12.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
405
+ "model.vit.transformer.resblocks.12.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
406
+ "model.vit.transformer.resblocks.12.ffn_norm.bias": "model-00004-of-00004.safetensors",
407
+ "model.vit.transformer.resblocks.12.ffn_norm.weight": "model-00004-of-00004.safetensors",
408
+ "model.vit.transformer.resblocks.13.attention.wk.bias": "model-00004-of-00004.safetensors",
409
+ "model.vit.transformer.resblocks.13.attention.wk.weight": "model-00004-of-00004.safetensors",
410
+ "model.vit.transformer.resblocks.13.attention.wo.bias": "model-00004-of-00004.safetensors",
411
+ "model.vit.transformer.resblocks.13.attention.wo.weight": "model-00004-of-00004.safetensors",
412
+ "model.vit.transformer.resblocks.13.attention.wq.bias": "model-00004-of-00004.safetensors",
413
+ "model.vit.transformer.resblocks.13.attention.wq.weight": "model-00004-of-00004.safetensors",
414
+ "model.vit.transformer.resblocks.13.attention.wv.bias": "model-00004-of-00004.safetensors",
415
+ "model.vit.transformer.resblocks.13.attention.wv.weight": "model-00004-of-00004.safetensors",
416
+ "model.vit.transformer.resblocks.13.attention_norm.bias": "model-00004-of-00004.safetensors",
417
+ "model.vit.transformer.resblocks.13.attention_norm.weight": "model-00004-of-00004.safetensors",
418
+ "model.vit.transformer.resblocks.13.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
419
+ "model.vit.transformer.resblocks.13.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
420
+ "model.vit.transformer.resblocks.13.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
421
+ "model.vit.transformer.resblocks.13.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
422
+ "model.vit.transformer.resblocks.13.ffn_norm.bias": "model-00004-of-00004.safetensors",
423
+ "model.vit.transformer.resblocks.13.ffn_norm.weight": "model-00004-of-00004.safetensors",
424
+ "model.vit.transformer.resblocks.14.attention.wk.bias": "model-00004-of-00004.safetensors",
425
+ "model.vit.transformer.resblocks.14.attention.wk.weight": "model-00004-of-00004.safetensors",
426
+ "model.vit.transformer.resblocks.14.attention.wo.bias": "model-00004-of-00004.safetensors",
427
+ "model.vit.transformer.resblocks.14.attention.wo.weight": "model-00004-of-00004.safetensors",
428
+ "model.vit.transformer.resblocks.14.attention.wq.bias": "model-00004-of-00004.safetensors",
429
+ "model.vit.transformer.resblocks.14.attention.wq.weight": "model-00004-of-00004.safetensors",
430
+ "model.vit.transformer.resblocks.14.attention.wv.bias": "model-00004-of-00004.safetensors",
431
+ "model.vit.transformer.resblocks.14.attention.wv.weight": "model-00004-of-00004.safetensors",
432
+ "model.vit.transformer.resblocks.14.attention_norm.bias": "model-00004-of-00004.safetensors",
433
+ "model.vit.transformer.resblocks.14.attention_norm.weight": "model-00004-of-00004.safetensors",
434
+ "model.vit.transformer.resblocks.14.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
435
+ "model.vit.transformer.resblocks.14.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
436
+ "model.vit.transformer.resblocks.14.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
437
+ "model.vit.transformer.resblocks.14.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
438
+ "model.vit.transformer.resblocks.14.ffn_norm.bias": "model-00004-of-00004.safetensors",
439
+ "model.vit.transformer.resblocks.14.ffn_norm.weight": "model-00004-of-00004.safetensors",
440
+ "model.vit.transformer.resblocks.15.attention.wk.bias": "model-00004-of-00004.safetensors",
441
+ "model.vit.transformer.resblocks.15.attention.wk.weight": "model-00004-of-00004.safetensors",
442
+ "model.vit.transformer.resblocks.15.attention.wo.bias": "model-00004-of-00004.safetensors",
443
+ "model.vit.transformer.resblocks.15.attention.wo.weight": "model-00004-of-00004.safetensors",
444
+ "model.vit.transformer.resblocks.15.attention.wq.bias": "model-00004-of-00004.safetensors",
445
+ "model.vit.transformer.resblocks.15.attention.wq.weight": "model-00004-of-00004.safetensors",
446
+ "model.vit.transformer.resblocks.15.attention.wv.bias": "model-00004-of-00004.safetensors",
447
+ "model.vit.transformer.resblocks.15.attention.wv.weight": "model-00004-of-00004.safetensors",
448
+ "model.vit.transformer.resblocks.15.attention_norm.bias": "model-00004-of-00004.safetensors",
449
+ "model.vit.transformer.resblocks.15.attention_norm.weight": "model-00004-of-00004.safetensors",
450
+ "model.vit.transformer.resblocks.15.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
451
+ "model.vit.transformer.resblocks.15.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
452
+ "model.vit.transformer.resblocks.15.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
453
+ "model.vit.transformer.resblocks.15.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
454
+ "model.vit.transformer.resblocks.15.ffn_norm.bias": "model-00004-of-00004.safetensors",
455
+ "model.vit.transformer.resblocks.15.ffn_norm.weight": "model-00004-of-00004.safetensors",
456
+ "model.vit.transformer.resblocks.16.attention.wk.bias": "model-00004-of-00004.safetensors",
457
+ "model.vit.transformer.resblocks.16.attention.wk.weight": "model-00004-of-00004.safetensors",
458
+ "model.vit.transformer.resblocks.16.attention.wo.bias": "model-00004-of-00004.safetensors",
459
+ "model.vit.transformer.resblocks.16.attention.wo.weight": "model-00004-of-00004.safetensors",
460
+ "model.vit.transformer.resblocks.16.attention.wq.bias": "model-00004-of-00004.safetensors",
461
+ "model.vit.transformer.resblocks.16.attention.wq.weight": "model-00004-of-00004.safetensors",
462
+ "model.vit.transformer.resblocks.16.attention.wv.bias": "model-00004-of-00004.safetensors",
463
+ "model.vit.transformer.resblocks.16.attention.wv.weight": "model-00004-of-00004.safetensors",
464
+ "model.vit.transformer.resblocks.16.attention_norm.bias": "model-00004-of-00004.safetensors",
465
+ "model.vit.transformer.resblocks.16.attention_norm.weight": "model-00004-of-00004.safetensors",
466
+ "model.vit.transformer.resblocks.16.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
467
+ "model.vit.transformer.resblocks.16.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
468
+ "model.vit.transformer.resblocks.16.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
469
+ "model.vit.transformer.resblocks.16.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
470
+ "model.vit.transformer.resblocks.16.ffn_norm.bias": "model-00004-of-00004.safetensors",
471
+ "model.vit.transformer.resblocks.16.ffn_norm.weight": "model-00004-of-00004.safetensors",
472
+ "model.vit.transformer.resblocks.17.attention.wk.bias": "model-00004-of-00004.safetensors",
473
+ "model.vit.transformer.resblocks.17.attention.wk.weight": "model-00004-of-00004.safetensors",
474
+ "model.vit.transformer.resblocks.17.attention.wo.bias": "model-00004-of-00004.safetensors",
475
+ "model.vit.transformer.resblocks.17.attention.wo.weight": "model-00004-of-00004.safetensors",
476
+ "model.vit.transformer.resblocks.17.attention.wq.bias": "model-00004-of-00004.safetensors",
477
+ "model.vit.transformer.resblocks.17.attention.wq.weight": "model-00004-of-00004.safetensors",
478
+ "model.vit.transformer.resblocks.17.attention.wv.bias": "model-00004-of-00004.safetensors",
479
+ "model.vit.transformer.resblocks.17.attention.wv.weight": "model-00004-of-00004.safetensors",
480
+ "model.vit.transformer.resblocks.17.attention_norm.bias": "model-00004-of-00004.safetensors",
481
+ "model.vit.transformer.resblocks.17.attention_norm.weight": "model-00004-of-00004.safetensors",
482
+ "model.vit.transformer.resblocks.17.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
483
+ "model.vit.transformer.resblocks.17.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
484
+ "model.vit.transformer.resblocks.17.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
485
+ "model.vit.transformer.resblocks.17.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
486
+ "model.vit.transformer.resblocks.17.ffn_norm.bias": "model-00004-of-00004.safetensors",
487
+ "model.vit.transformer.resblocks.17.ffn_norm.weight": "model-00004-of-00004.safetensors",
488
+ "model.vit.transformer.resblocks.18.attention.wk.bias": "model-00004-of-00004.safetensors",
489
+ "model.vit.transformer.resblocks.18.attention.wk.weight": "model-00004-of-00004.safetensors",
490
+ "model.vit.transformer.resblocks.18.attention.wo.bias": "model-00004-of-00004.safetensors",
491
+ "model.vit.transformer.resblocks.18.attention.wo.weight": "model-00004-of-00004.safetensors",
492
+ "model.vit.transformer.resblocks.18.attention.wq.bias": "model-00004-of-00004.safetensors",
493
+ "model.vit.transformer.resblocks.18.attention.wq.weight": "model-00004-of-00004.safetensors",
494
+ "model.vit.transformer.resblocks.18.attention.wv.bias": "model-00004-of-00004.safetensors",
495
+ "model.vit.transformer.resblocks.18.attention.wv.weight": "model-00004-of-00004.safetensors",
496
+ "model.vit.transformer.resblocks.18.attention_norm.bias": "model-00004-of-00004.safetensors",
497
+ "model.vit.transformer.resblocks.18.attention_norm.weight": "model-00004-of-00004.safetensors",
498
+ "model.vit.transformer.resblocks.18.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
499
+ "model.vit.transformer.resblocks.18.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
500
+ "model.vit.transformer.resblocks.18.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
501
+ "model.vit.transformer.resblocks.18.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
502
+ "model.vit.transformer.resblocks.18.ffn_norm.bias": "model-00004-of-00004.safetensors",
503
+ "model.vit.transformer.resblocks.18.ffn_norm.weight": "model-00004-of-00004.safetensors",
504
+ "model.vit.transformer.resblocks.19.attention.wk.bias": "model-00004-of-00004.safetensors",
505
+ "model.vit.transformer.resblocks.19.attention.wk.weight": "model-00004-of-00004.safetensors",
506
+ "model.vit.transformer.resblocks.19.attention.wo.bias": "model-00004-of-00004.safetensors",
507
+ "model.vit.transformer.resblocks.19.attention.wo.weight": "model-00004-of-00004.safetensors",
508
+ "model.vit.transformer.resblocks.19.attention.wq.bias": "model-00004-of-00004.safetensors",
509
+ "model.vit.transformer.resblocks.19.attention.wq.weight": "model-00004-of-00004.safetensors",
510
+ "model.vit.transformer.resblocks.19.attention.wv.bias": "model-00004-of-00004.safetensors",
511
+ "model.vit.transformer.resblocks.19.attention.wv.weight": "model-00004-of-00004.safetensors",
512
+ "model.vit.transformer.resblocks.19.attention_norm.bias": "model-00004-of-00004.safetensors",
513
+ "model.vit.transformer.resblocks.19.attention_norm.weight": "model-00004-of-00004.safetensors",
514
+ "model.vit.transformer.resblocks.19.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
515
+ "model.vit.transformer.resblocks.19.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
516
+ "model.vit.transformer.resblocks.19.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
517
+ "model.vit.transformer.resblocks.19.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
518
+ "model.vit.transformer.resblocks.19.ffn_norm.bias": "model-00004-of-00004.safetensors",
519
+ "model.vit.transformer.resblocks.19.ffn_norm.weight": "model-00004-of-00004.safetensors",
520
+ "model.vit.transformer.resblocks.2.attention.wk.bias": "model-00004-of-00004.safetensors",
521
+ "model.vit.transformer.resblocks.2.attention.wk.weight": "model-00004-of-00004.safetensors",
522
+ "model.vit.transformer.resblocks.2.attention.wo.bias": "model-00004-of-00004.safetensors",
523
+ "model.vit.transformer.resblocks.2.attention.wo.weight": "model-00004-of-00004.safetensors",
524
+ "model.vit.transformer.resblocks.2.attention.wq.bias": "model-00004-of-00004.safetensors",
525
+ "model.vit.transformer.resblocks.2.attention.wq.weight": "model-00004-of-00004.safetensors",
526
+ "model.vit.transformer.resblocks.2.attention.wv.bias": "model-00004-of-00004.safetensors",
527
+ "model.vit.transformer.resblocks.2.attention.wv.weight": "model-00004-of-00004.safetensors",
528
+ "model.vit.transformer.resblocks.2.attention_norm.bias": "model-00004-of-00004.safetensors",
529
+ "model.vit.transformer.resblocks.2.attention_norm.weight": "model-00004-of-00004.safetensors",
530
+ "model.vit.transformer.resblocks.2.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
531
+ "model.vit.transformer.resblocks.2.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
532
+ "model.vit.transformer.resblocks.2.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
533
+ "model.vit.transformer.resblocks.2.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
534
+ "model.vit.transformer.resblocks.2.ffn_norm.bias": "model-00004-of-00004.safetensors",
535
+ "model.vit.transformer.resblocks.2.ffn_norm.weight": "model-00004-of-00004.safetensors",
536
+ "model.vit.transformer.resblocks.20.attention.wk.bias": "model-00004-of-00004.safetensors",
537
+ "model.vit.transformer.resblocks.20.attention.wk.weight": "model-00004-of-00004.safetensors",
538
+ "model.vit.transformer.resblocks.20.attention.wo.bias": "model-00004-of-00004.safetensors",
539
+ "model.vit.transformer.resblocks.20.attention.wo.weight": "model-00004-of-00004.safetensors",
540
+ "model.vit.transformer.resblocks.20.attention.wq.bias": "model-00004-of-00004.safetensors",
541
+ "model.vit.transformer.resblocks.20.attention.wq.weight": "model-00004-of-00004.safetensors",
542
+ "model.vit.transformer.resblocks.20.attention.wv.bias": "model-00004-of-00004.safetensors",
543
+ "model.vit.transformer.resblocks.20.attention.wv.weight": "model-00004-of-00004.safetensors",
544
+ "model.vit.transformer.resblocks.20.attention_norm.bias": "model-00004-of-00004.safetensors",
545
+ "model.vit.transformer.resblocks.20.attention_norm.weight": "model-00004-of-00004.safetensors",
546
+ "model.vit.transformer.resblocks.20.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
547
+ "model.vit.transformer.resblocks.20.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
548
+ "model.vit.transformer.resblocks.20.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
549
+ "model.vit.transformer.resblocks.20.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
550
+ "model.vit.transformer.resblocks.20.ffn_norm.bias": "model-00004-of-00004.safetensors",
551
+ "model.vit.transformer.resblocks.20.ffn_norm.weight": "model-00004-of-00004.safetensors",
552
+ "model.vit.transformer.resblocks.21.attention.wk.bias": "model-00004-of-00004.safetensors",
553
+ "model.vit.transformer.resblocks.21.attention.wk.weight": "model-00004-of-00004.safetensors",
554
+ "model.vit.transformer.resblocks.21.attention.wo.bias": "model-00004-of-00004.safetensors",
555
+ "model.vit.transformer.resblocks.21.attention.wo.weight": "model-00004-of-00004.safetensors",
556
+ "model.vit.transformer.resblocks.21.attention.wq.bias": "model-00004-of-00004.safetensors",
557
+ "model.vit.transformer.resblocks.21.attention.wq.weight": "model-00004-of-00004.safetensors",
558
+ "model.vit.transformer.resblocks.21.attention.wv.bias": "model-00004-of-00004.safetensors",
559
+ "model.vit.transformer.resblocks.21.attention.wv.weight": "model-00004-of-00004.safetensors",
560
+ "model.vit.transformer.resblocks.21.attention_norm.bias": "model-00004-of-00004.safetensors",
561
+ "model.vit.transformer.resblocks.21.attention_norm.weight": "model-00004-of-00004.safetensors",
562
+ "model.vit.transformer.resblocks.21.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
563
+ "model.vit.transformer.resblocks.21.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
564
+ "model.vit.transformer.resblocks.21.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
565
+ "model.vit.transformer.resblocks.21.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
566
+ "model.vit.transformer.resblocks.21.ffn_norm.bias": "model-00004-of-00004.safetensors",
567
+ "model.vit.transformer.resblocks.21.ffn_norm.weight": "model-00004-of-00004.safetensors",
568
+ "model.vit.transformer.resblocks.22.attention.wk.bias": "model-00004-of-00004.safetensors",
569
+ "model.vit.transformer.resblocks.22.attention.wk.weight": "model-00004-of-00004.safetensors",
570
+ "model.vit.transformer.resblocks.22.attention.wo.bias": "model-00004-of-00004.safetensors",
571
+ "model.vit.transformer.resblocks.22.attention.wo.weight": "model-00004-of-00004.safetensors",
572
+ "model.vit.transformer.resblocks.22.attention.wq.bias": "model-00004-of-00004.safetensors",
573
+ "model.vit.transformer.resblocks.22.attention.wq.weight": "model-00004-of-00004.safetensors",
574
+ "model.vit.transformer.resblocks.22.attention.wv.bias": "model-00004-of-00004.safetensors",
575
+ "model.vit.transformer.resblocks.22.attention.wv.weight": "model-00004-of-00004.safetensors",
576
+ "model.vit.transformer.resblocks.22.attention_norm.bias": "model-00004-of-00004.safetensors",
577
+ "model.vit.transformer.resblocks.22.attention_norm.weight": "model-00004-of-00004.safetensors",
578
+ "model.vit.transformer.resblocks.22.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
579
+ "model.vit.transformer.resblocks.22.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
580
+ "model.vit.transformer.resblocks.22.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
581
+ "model.vit.transformer.resblocks.22.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
582
+ "model.vit.transformer.resblocks.22.ffn_norm.bias": "model-00004-of-00004.safetensors",
583
+ "model.vit.transformer.resblocks.22.ffn_norm.weight": "model-00004-of-00004.safetensors",
584
+ "model.vit.transformer.resblocks.23.attention.wk.bias": "model-00004-of-00004.safetensors",
585
+ "model.vit.transformer.resblocks.23.attention.wk.weight": "model-00004-of-00004.safetensors",
586
+ "model.vit.transformer.resblocks.23.attention.wo.bias": "model-00004-of-00004.safetensors",
587
+ "model.vit.transformer.resblocks.23.attention.wo.weight": "model-00004-of-00004.safetensors",
588
+ "model.vit.transformer.resblocks.23.attention.wq.bias": "model-00004-of-00004.safetensors",
589
+ "model.vit.transformer.resblocks.23.attention.wq.weight": "model-00004-of-00004.safetensors",
590
+ "model.vit.transformer.resblocks.23.attention.wv.bias": "model-00004-of-00004.safetensors",
591
+ "model.vit.transformer.resblocks.23.attention.wv.weight": "model-00004-of-00004.safetensors",
592
+ "model.vit.transformer.resblocks.23.attention_norm.bias": "model-00004-of-00004.safetensors",
593
+ "model.vit.transformer.resblocks.23.attention_norm.weight": "model-00004-of-00004.safetensors",
594
+ "model.vit.transformer.resblocks.23.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
595
+ "model.vit.transformer.resblocks.23.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
596
+ "model.vit.transformer.resblocks.23.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
597
+ "model.vit.transformer.resblocks.23.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
598
+ "model.vit.transformer.resblocks.23.ffn_norm.bias": "model-00004-of-00004.safetensors",
599
+ "model.vit.transformer.resblocks.23.ffn_norm.weight": "model-00004-of-00004.safetensors",
600
+ "model.vit.transformer.resblocks.24.attention.wk.bias": "model-00004-of-00004.safetensors",
601
+ "model.vit.transformer.resblocks.24.attention.wk.weight": "model-00004-of-00004.safetensors",
602
+ "model.vit.transformer.resblocks.24.attention.wo.bias": "model-00004-of-00004.safetensors",
603
+ "model.vit.transformer.resblocks.24.attention.wo.weight": "model-00004-of-00004.safetensors",
604
+ "model.vit.transformer.resblocks.24.attention.wq.bias": "model-00004-of-00004.safetensors",
605
+ "model.vit.transformer.resblocks.24.attention.wq.weight": "model-00004-of-00004.safetensors",
606
+ "model.vit.transformer.resblocks.24.attention.wv.bias": "model-00004-of-00004.safetensors",
607
+ "model.vit.transformer.resblocks.24.attention.wv.weight": "model-00004-of-00004.safetensors",
608
+ "model.vit.transformer.resblocks.24.attention_norm.bias": "model-00004-of-00004.safetensors",
609
+ "model.vit.transformer.resblocks.24.attention_norm.weight": "model-00004-of-00004.safetensors",
610
+ "model.vit.transformer.resblocks.24.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
611
+ "model.vit.transformer.resblocks.24.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
612
+ "model.vit.transformer.resblocks.24.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
613
+ "model.vit.transformer.resblocks.24.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
614
+ "model.vit.transformer.resblocks.24.ffn_norm.bias": "model-00004-of-00004.safetensors",
615
+ "model.vit.transformer.resblocks.24.ffn_norm.weight": "model-00004-of-00004.safetensors",
616
+ "model.vit.transformer.resblocks.3.attention.wk.bias": "model-00004-of-00004.safetensors",
617
+ "model.vit.transformer.resblocks.3.attention.wk.weight": "model-00004-of-00004.safetensors",
618
+ "model.vit.transformer.resblocks.3.attention.wo.bias": "model-00004-of-00004.safetensors",
619
+ "model.vit.transformer.resblocks.3.attention.wo.weight": "model-00004-of-00004.safetensors",
620
+ "model.vit.transformer.resblocks.3.attention.wq.bias": "model-00004-of-00004.safetensors",
621
+ "model.vit.transformer.resblocks.3.attention.wq.weight": "model-00004-of-00004.safetensors",
622
+ "model.vit.transformer.resblocks.3.attention.wv.bias": "model-00004-of-00004.safetensors",
623
+ "model.vit.transformer.resblocks.3.attention.wv.weight": "model-00004-of-00004.safetensors",
624
+ "model.vit.transformer.resblocks.3.attention_norm.bias": "model-00004-of-00004.safetensors",
625
+ "model.vit.transformer.resblocks.3.attention_norm.weight": "model-00004-of-00004.safetensors",
626
+ "model.vit.transformer.resblocks.3.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
627
+ "model.vit.transformer.resblocks.3.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
628
+ "model.vit.transformer.resblocks.3.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
629
+ "model.vit.transformer.resblocks.3.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
630
+ "model.vit.transformer.resblocks.3.ffn_norm.bias": "model-00004-of-00004.safetensors",
631
+ "model.vit.transformer.resblocks.3.ffn_norm.weight": "model-00004-of-00004.safetensors",
632
+ "model.vit.transformer.resblocks.4.attention.wk.bias": "model-00004-of-00004.safetensors",
633
+ "model.vit.transformer.resblocks.4.attention.wk.weight": "model-00004-of-00004.safetensors",
634
+ "model.vit.transformer.resblocks.4.attention.wo.bias": "model-00004-of-00004.safetensors",
635
+ "model.vit.transformer.resblocks.4.attention.wo.weight": "model-00004-of-00004.safetensors",
636
+ "model.vit.transformer.resblocks.4.attention.wq.bias": "model-00004-of-00004.safetensors",
637
+ "model.vit.transformer.resblocks.4.attention.wq.weight": "model-00004-of-00004.safetensors",
638
+ "model.vit.transformer.resblocks.4.attention.wv.bias": "model-00004-of-00004.safetensors",
639
+ "model.vit.transformer.resblocks.4.attention.wv.weight": "model-00004-of-00004.safetensors",
640
+ "model.vit.transformer.resblocks.4.attention_norm.bias": "model-00004-of-00004.safetensors",
641
+ "model.vit.transformer.resblocks.4.attention_norm.weight": "model-00004-of-00004.safetensors",
642
+ "model.vit.transformer.resblocks.4.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
643
+ "model.vit.transformer.resblocks.4.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
644
+ "model.vit.transformer.resblocks.4.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
645
+ "model.vit.transformer.resblocks.4.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
646
+ "model.vit.transformer.resblocks.4.ffn_norm.bias": "model-00004-of-00004.safetensors",
647
+ "model.vit.transformer.resblocks.4.ffn_norm.weight": "model-00004-of-00004.safetensors",
648
+ "model.vit.transformer.resblocks.5.attention.wk.bias": "model-00004-of-00004.safetensors",
649
+ "model.vit.transformer.resblocks.5.attention.wk.weight": "model-00004-of-00004.safetensors",
650
+ "model.vit.transformer.resblocks.5.attention.wo.bias": "model-00004-of-00004.safetensors",
651
+ "model.vit.transformer.resblocks.5.attention.wo.weight": "model-00004-of-00004.safetensors",
652
+ "model.vit.transformer.resblocks.5.attention.wq.bias": "model-00004-of-00004.safetensors",
653
+ "model.vit.transformer.resblocks.5.attention.wq.weight": "model-00004-of-00004.safetensors",
654
+ "model.vit.transformer.resblocks.5.attention.wv.bias": "model-00004-of-00004.safetensors",
655
+ "model.vit.transformer.resblocks.5.attention.wv.weight": "model-00004-of-00004.safetensors",
656
+ "model.vit.transformer.resblocks.5.attention_norm.bias": "model-00004-of-00004.safetensors",
657
+ "model.vit.transformer.resblocks.5.attention_norm.weight": "model-00004-of-00004.safetensors",
658
+ "model.vit.transformer.resblocks.5.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
659
+ "model.vit.transformer.resblocks.5.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
660
+ "model.vit.transformer.resblocks.5.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
661
+ "model.vit.transformer.resblocks.5.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
662
+ "model.vit.transformer.resblocks.5.ffn_norm.bias": "model-00004-of-00004.safetensors",
663
+ "model.vit.transformer.resblocks.5.ffn_norm.weight": "model-00004-of-00004.safetensors",
664
+ "model.vit.transformer.resblocks.6.attention.wk.bias": "model-00004-of-00004.safetensors",
665
+ "model.vit.transformer.resblocks.6.attention.wk.weight": "model-00004-of-00004.safetensors",
666
+ "model.vit.transformer.resblocks.6.attention.wo.bias": "model-00004-of-00004.safetensors",
667
+ "model.vit.transformer.resblocks.6.attention.wo.weight": "model-00004-of-00004.safetensors",
668
+ "model.vit.transformer.resblocks.6.attention.wq.bias": "model-00004-of-00004.safetensors",
669
+ "model.vit.transformer.resblocks.6.attention.wq.weight": "model-00004-of-00004.safetensors",
670
+ "model.vit.transformer.resblocks.6.attention.wv.bias": "model-00004-of-00004.safetensors",
671
+ "model.vit.transformer.resblocks.6.attention.wv.weight": "model-00004-of-00004.safetensors",
672
+ "model.vit.transformer.resblocks.6.attention_norm.bias": "model-00004-of-00004.safetensors",
673
+ "model.vit.transformer.resblocks.6.attention_norm.weight": "model-00004-of-00004.safetensors",
674
+ "model.vit.transformer.resblocks.6.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
675
+ "model.vit.transformer.resblocks.6.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
676
+ "model.vit.transformer.resblocks.6.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
677
+ "model.vit.transformer.resblocks.6.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
678
+ "model.vit.transformer.resblocks.6.ffn_norm.bias": "model-00004-of-00004.safetensors",
679
+ "model.vit.transformer.resblocks.6.ffn_norm.weight": "model-00004-of-00004.safetensors",
680
+ "model.vit.transformer.resblocks.7.attention.wk.bias": "model-00004-of-00004.safetensors",
681
+ "model.vit.transformer.resblocks.7.attention.wk.weight": "model-00004-of-00004.safetensors",
682
+ "model.vit.transformer.resblocks.7.attention.wo.bias": "model-00004-of-00004.safetensors",
683
+ "model.vit.transformer.resblocks.7.attention.wo.weight": "model-00004-of-00004.safetensors",
684
+ "model.vit.transformer.resblocks.7.attention.wq.bias": "model-00004-of-00004.safetensors",
685
+ "model.vit.transformer.resblocks.7.attention.wq.weight": "model-00004-of-00004.safetensors",
686
+ "model.vit.transformer.resblocks.7.attention.wv.bias": "model-00004-of-00004.safetensors",
687
+ "model.vit.transformer.resblocks.7.attention.wv.weight": "model-00004-of-00004.safetensors",
688
+ "model.vit.transformer.resblocks.7.attention_norm.bias": "model-00004-of-00004.safetensors",
689
+ "model.vit.transformer.resblocks.7.attention_norm.weight": "model-00004-of-00004.safetensors",
690
+ "model.vit.transformer.resblocks.7.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
691
+ "model.vit.transformer.resblocks.7.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
692
+ "model.vit.transformer.resblocks.7.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
693
+ "model.vit.transformer.resblocks.7.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
694
+ "model.vit.transformer.resblocks.7.ffn_norm.bias": "model-00004-of-00004.safetensors",
695
+ "model.vit.transformer.resblocks.7.ffn_norm.weight": "model-00004-of-00004.safetensors",
696
+ "model.vit.transformer.resblocks.8.attention.wk.bias": "model-00004-of-00004.safetensors",
697
+ "model.vit.transformer.resblocks.8.attention.wk.weight": "model-00004-of-00004.safetensors",
698
+ "model.vit.transformer.resblocks.8.attention.wo.bias": "model-00004-of-00004.safetensors",
699
+ "model.vit.transformer.resblocks.8.attention.wo.weight": "model-00004-of-00004.safetensors",
700
+ "model.vit.transformer.resblocks.8.attention.wq.bias": "model-00004-of-00004.safetensors",
701
+ "model.vit.transformer.resblocks.8.attention.wq.weight": "model-00004-of-00004.safetensors",
702
+ "model.vit.transformer.resblocks.8.attention.wv.bias": "model-00004-of-00004.safetensors",
703
+ "model.vit.transformer.resblocks.8.attention.wv.weight": "model-00004-of-00004.safetensors",
704
+ "model.vit.transformer.resblocks.8.attention_norm.bias": "model-00004-of-00004.safetensors",
705
+ "model.vit.transformer.resblocks.8.attention_norm.weight": "model-00004-of-00004.safetensors",
706
+ "model.vit.transformer.resblocks.8.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
707
+ "model.vit.transformer.resblocks.8.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
708
+ "model.vit.transformer.resblocks.8.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
709
+ "model.vit.transformer.resblocks.8.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
710
+ "model.vit.transformer.resblocks.8.ffn_norm.bias": "model-00004-of-00004.safetensors",
711
+ "model.vit.transformer.resblocks.8.ffn_norm.weight": "model-00004-of-00004.safetensors",
712
+ "model.vit.transformer.resblocks.9.attention.wk.bias": "model-00004-of-00004.safetensors",
713
+ "model.vit.transformer.resblocks.9.attention.wk.weight": "model-00004-of-00004.safetensors",
714
+ "model.vit.transformer.resblocks.9.attention.wo.bias": "model-00004-of-00004.safetensors",
715
+ "model.vit.transformer.resblocks.9.attention.wo.weight": "model-00004-of-00004.safetensors",
716
+ "model.vit.transformer.resblocks.9.attention.wq.bias": "model-00004-of-00004.safetensors",
717
+ "model.vit.transformer.resblocks.9.attention.wq.weight": "model-00004-of-00004.safetensors",
718
+ "model.vit.transformer.resblocks.9.attention.wv.bias": "model-00004-of-00004.safetensors",
719
+ "model.vit.transformer.resblocks.9.attention.wv.weight": "model-00004-of-00004.safetensors",
720
+ "model.vit.transformer.resblocks.9.attention_norm.bias": "model-00004-of-00004.safetensors",
721
+ "model.vit.transformer.resblocks.9.attention_norm.weight": "model-00004-of-00004.safetensors",
722
+ "model.vit.transformer.resblocks.9.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
723
+ "model.vit.transformer.resblocks.9.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
724
+ "model.vit.transformer.resblocks.9.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
725
+ "model.vit.transformer.resblocks.9.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
726
+ "model.vit.transformer.resblocks.9.ffn_norm.bias": "model-00004-of-00004.safetensors",
727
+ "model.vit.transformer.resblocks.9.ffn_norm.weight": "model-00004-of-00004.safetensors"
728
+ }
729
+ }
modeling_molmo2.py ADDED
@@ -0,0 +1,1764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass
4
+ from typing import Optional, Union, Callable
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+
10
+ from transformers.models.auto import AutoModelForImageTextToText
11
+ from transformers.activations import ACT2FN
12
+ from transformers.configuration_utils import PretrainedConfig
13
+ from transformers.cache_utils import Cache, DynamicCache
14
+ from transformers.generation import GenerationMixin
15
+ from transformers.masking_utils import create_causal_mask, create_masks_for_generate
16
+ from transformers.modeling_flash_attention_utils import (
17
+ _flash_attention_forward,
18
+ FlashAttentionKwargs,
19
+ flash_attn_supports_top_left_mask,
20
+ )
21
+ from transformers.modeling_layers import GradientCheckpointingLayer
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ )
25
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
26
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
27
+ from transformers.processing_utils import Unpack
28
+ from transformers.utils import (
29
+ ModelOutput,
30
+ TransformersKwargs,
31
+ can_return_tuple,
32
+ logging,
33
+ )
34
+
35
+ from .configuration_molmo2 import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ @dataclass
42
+ class Molmo2CausalLMOutputWithPast(ModelOutput):
43
+ """
44
+ Base class for Molmo2 causal language model (or autoregressive) outputs.
45
+
46
+ Args:
47
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
48
+ Language modeling loss (for next-token prediction).
49
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
50
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
51
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
52
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
53
+
54
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
55
+ `past_key_values` input) to speed up sequential decoding.
56
+ image_hidden_states (`torch.FloatTensor`, *optional*):
57
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
58
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
59
+ """
60
+
61
+ loss: Optional[torch.FloatTensor] = None
62
+ logits: Optional[torch.FloatTensor] = None
63
+ past_key_values: Optional[Cache] = None
64
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
65
+ attentions: Optional[tuple[torch.FloatTensor]] = None
66
+ image_hidden_states: Optional[torch.FloatTensor] = None
67
+
68
+
69
+ @dataclass
70
+ class Molmo2ModelOutputWithPast(BaseModelOutputWithPast):
71
+ """
72
+ Base class for Molmo2 outputs, with hidden states and attentions.
73
+
74
+ Args:
75
+ image_hidden_states (`torch.FloatTensor`, *optional*):
76
+ A `torch.FloatTensor` of size `(batch_num_patches, hidden_size)`.
77
+ image_hidden_states of the model produced by the vision backbone
78
+ """
79
+ last_hidden_state: Optional[torch.FloatTensor] = None
80
+ past_key_values: Optional[Cache] = None
81
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
82
+ attentions: Optional[tuple[torch.FloatTensor]] = None
83
+ image_hidden_states: Optional[torch.FloatTensor] = None
84
+
85
+
86
+ class ViTMLP(nn.Module):
87
+ def __init__(self, dim: int, hidden_dim: int, hidden_act: str, device: Union[str, torch.device] = None):
88
+ super().__init__()
89
+ self.w1 = nn.Linear(dim, hidden_dim, bias=True, device=device)
90
+ self.act = ACT2FN[hidden_act]
91
+ self.w2 = nn.Linear(hidden_dim, dim, bias=True, device=device)
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ return self.w2(self.act(self.w1(x)))
95
+
96
+
97
+ class ViTMultiHeadDotProductAttention(nn.Module):
98
+ def __init__(
99
+ self,
100
+ hidden_size: int,
101
+ num_heads: int,
102
+ num_key_value_heads: int,
103
+ head_dim: int,
104
+ use_bias: bool = True,
105
+ input_dim: Optional[int] = None,
106
+ float32_attention: bool = True,
107
+ attention_dropout: float = 0.0,
108
+ residual_dropout: float = 0.0,
109
+ device: Union[str, torch.device] = None,
110
+ attn_implementation: str = "eager",
111
+ ):
112
+ super().__init__()
113
+
114
+ self.hidden_size = hidden_size
115
+ self.num_heads = num_heads
116
+ self.head_dim = head_dim
117
+ self.num_key_value_heads = num_key_value_heads
118
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
119
+ self.attn_implementation = attn_implementation
120
+ self.is_causal = False
121
+
122
+ input_dim = input_dim or hidden_size
123
+
124
+ self.wq = nn.Linear(
125
+ input_dim,
126
+ self.num_heads * self.head_dim,
127
+ bias=use_bias,
128
+ device=device,
129
+ )
130
+ self.wk = nn.Linear(
131
+ input_dim,
132
+ self.num_key_value_heads * self.head_dim,
133
+ bias=use_bias,
134
+ device=device,
135
+ )
136
+ self.wv = nn.Linear(
137
+ input_dim,
138
+ self.num_key_value_heads * self.head_dim,
139
+ bias=use_bias,
140
+ device=device,
141
+ )
142
+ self.wo = nn.Linear(
143
+ self.num_heads * self.head_dim,
144
+ self.hidden_size,
145
+ )
146
+ self.float32_attention = float32_attention
147
+ self.attention_dropout = attention_dropout
148
+ self.residual_dropout = nn.Dropout(residual_dropout)
149
+
150
+ def _split_heads(self, hidden_states, num_heads) -> torch.Tensor:
151
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
152
+
153
+ def _merge_heads(self, hidden_states) -> torch.Tensor:
154
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
155
+
156
+ def forward(
157
+ self,
158
+ inputs_q: torch.Tensor,
159
+ inputs_kv: Optional[torch.Tensor] = None,
160
+ attn_mask: Optional[torch.Tensor] = None,
161
+ ) -> torch.Tensor:
162
+
163
+ if inputs_kv is not None:
164
+ inputs_k = inputs_kv
165
+ inputs_v = inputs_kv
166
+ else:
167
+ inputs_k = inputs_q
168
+ inputs_v = inputs_q
169
+
170
+ xq, xk, xv = self.wq(inputs_q), self.wk(inputs_k), self.wv(inputs_v)
171
+
172
+ xq = self._split_heads(xq, self.num_heads)
173
+ xk = self._split_heads(xk, self.num_key_value_heads)
174
+ xv = self._split_heads(xv, self.num_key_value_heads)
175
+
176
+ if self.num_heads != self.num_key_value_heads:
177
+ xk = xk.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
178
+ xv = xv.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
179
+
180
+ og_dtype = xq.dtype
181
+
182
+ if self.float32_attention:
183
+ xq = xq.to(torch.float)
184
+ xk = xk.to(torch.float)
185
+
186
+ dropout_p = 0.0 if not self.training else self.attention_dropout
187
+
188
+ if self.attn_implementation == "eager":
189
+ attn_weights = torch.einsum("...qhd,...khd->...hqk", xq / math.sqrt(xq.size(-1)), xk)
190
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(xq.dtype)
191
+ attn_weights = F.dropout(
192
+ attn_weights,
193
+ p=dropout_p,
194
+ training=self.training
195
+ )
196
+ attn_output = torch.einsum("...hqk,...khd->...qhd", attn_weights.to(xv.dtype), xv)
197
+
198
+ elif self.attn_implementation == "sdpa":
199
+ if not torch.is_autocast_enabled():
200
+ xv = xv.to(torch.float)
201
+
202
+ attn_output = F.scaled_dot_product_attention(
203
+ xq.transpose(1, 2).contiguous(),
204
+ xk.transpose(1, 2).contiguous(),
205
+ xv.transpose(1, 2).contiguous(),
206
+ attn_mask=attn_mask,
207
+ is_causal=False,
208
+ dropout_p=dropout_p,
209
+ ).transpose(1, 2)
210
+
211
+ elif self.attn_implementation == "flash_attention_2":
212
+ if xq.dtype == torch.float32:
213
+ if torch.is_autocast_enabled():
214
+ target_dtype = torch.get_autocast_gpu_dtype()
215
+ else:
216
+ target_dtype = self.wq.weight.dtype
217
+ attn_output = _flash_attention_forward(
218
+ xq,
219
+ xk,
220
+ xv,
221
+ attention_mask=attn_mask,
222
+ query_length=inputs_q.shape[1],
223
+ is_causal=False,
224
+ dropout=dropout_p,
225
+ softmax_scale=xq.shape[-1] ** -0.5,
226
+ use_top_left_mask=flash_attn_supports_top_left_mask(),
227
+ target_dtype=target_dtype,
228
+ implementation=self.attn_implementation,
229
+ )
230
+ else:
231
+ raise ValueError(f"Attention implementation {self.attn_implementation} not supported")
232
+
233
+ attn_output = attn_output.to(og_dtype)
234
+ attn_output = self._merge_heads(attn_output)
235
+ attn_output = self.wo(attn_output)
236
+ attn_output = self.residual_dropout(attn_output)
237
+
238
+ return attn_output
239
+
240
+
241
+ class Molmo2VisionBlock(nn.Module):
242
+
243
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
244
+ super().__init__()
245
+ self.attention = ViTMultiHeadDotProductAttention(
246
+ hidden_size=config.hidden_size,
247
+ num_heads=config.num_attention_heads,
248
+ num_key_value_heads=config.num_key_value_heads,
249
+ head_dim=config.head_dim,
250
+ float32_attention=config.float32_attention,
251
+ attention_dropout=config.attention_dropout,
252
+ residual_dropout=config.residual_dropout,
253
+ device=device,
254
+ attn_implementation=config._attn_implementation,
255
+ )
256
+ self.feed_forward = ViTMLP(config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
257
+ self.attention_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
258
+ self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
259
+
260
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
261
+ x = x + self.attention(self.attention_norm(x))
262
+ x = x + self.feed_forward(self.ffn_norm(x))
263
+ return x
264
+
265
+
266
+ class Molmo2VisionBlockCollection(nn.Module):
267
+
268
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
269
+ super().__init__()
270
+ self.conifg = config
271
+ self.resblocks = nn.ModuleList([
272
+ Molmo2VisionBlock(config, device) for _ in range(config.num_hidden_layers)
273
+ ])
274
+
275
+ def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
276
+ hidden_states = []
277
+ for r in self.resblocks:
278
+ x = r(x)
279
+ hidden_states.append(x)
280
+ return hidden_states
281
+
282
+
283
+ class Molmo2VisionTransformer(nn.Module):
284
+
285
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
286
+ super().__init__()
287
+ self.config = config
288
+
289
+ # positional embeddings
290
+ self.scale = config.hidden_size ** -0.5
291
+ self.num_prefix_tokens: int = 0 # no class embeddings
292
+ self.positional_embedding = nn.Parameter(
293
+ torch.zeros(config.image_num_pos, config.hidden_size, device=device),
294
+ )
295
+
296
+ image_patch_size = config.image_patch_size
297
+ self.patch_embedding = nn.Linear(
298
+ image_patch_size * image_patch_size * 3,
299
+ config.hidden_size,
300
+ bias=True,
301
+ device=device,
302
+ )
303
+
304
+ self.transformer = Molmo2VisionBlockCollection(config, device)
305
+
306
+ def add_pos_emb(self, x: torch.Tensor, patch_num: int) -> torch.Tensor:
307
+ pos_emb = self.positional_embedding
308
+
309
+ pos_emb = pos_emb.reshape(
310
+ (int(math.sqrt(pos_emb.shape[0])), int(math.sqrt(pos_emb.shape[0])), pos_emb.shape[1])
311
+ )
312
+
313
+ (patch_num_0, patch_num_1) = patch_num
314
+
315
+ if pos_emb.shape[0] != patch_num_0 or pos_emb.shape[1] != patch_num_1:
316
+ # Dervied from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
317
+ # antialias: default True in jax.image.resize
318
+ pos_emb = pos_emb.unsqueeze(0).permute(0, 3, 1, 2)
319
+ pos_emb = F.interpolate(
320
+ pos_emb, size=(patch_num_0, patch_num_1), mode="bicubic", align_corners=False, antialias=True,
321
+ )
322
+ pos_emb = pos_emb.permute(0, 2, 3, 1).squeeze(0)
323
+
324
+ pos_emb = pos_emb.reshape(-1, pos_emb.shape[-1])
325
+ x = x + pos_emb[None, :, :].to(x.dtype)
326
+ return x
327
+
328
+ def forward(self, x: torch.Tensor, patch_num: int = None) -> list[torch.Tensor]:
329
+ """
330
+ : param x: (batch_size, num_patch, n_pixels)
331
+ """
332
+ if patch_num is None:
333
+ patch_num = self.config.image_num_patch
334
+
335
+ B, N, D = x.shape
336
+
337
+ x = self.patch_embedding(x)
338
+
339
+ # class embeddings and positional embeddings
340
+ x = self.add_pos_emb(x, patch_num)
341
+
342
+ hidden_states = self.transformer(x)
343
+ return hidden_states
344
+
345
+
346
+ class ImageProjectorMLP(nn.Module):
347
+
348
+ def __init__(
349
+ self,
350
+ input_dim: int,
351
+ hidden_dim: int,
352
+ output_dim: int,
353
+ hidden_act: str,
354
+ device: Union[str, torch.device] = None,
355
+ ):
356
+ super().__init__()
357
+ self.w1 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
358
+ self.w2 = nn.Linear(hidden_dim, output_dim, bias=False, device=device)
359
+ self.w3 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
360
+ self.act = ACT2FN[hidden_act]
361
+
362
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
363
+ return self.w2(self.act(self.w1(x)) * self.w3(x))
364
+
365
+
366
+ class Molmo2VisionBackbone(nn.Module):
367
+ def __init__(self, vit_config: Molmo2VitConfig, adapter_config: Molmo2AdapterConfig):
368
+ super().__init__()
369
+ self.vit_config = vit_config
370
+ self.adapter_config = adapter_config
371
+
372
+ self.vit_layers = []
373
+ for layer in adapter_config.vit_layers:
374
+ if layer >= 0:
375
+ self.vit_layers.append(layer)
376
+ else:
377
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
378
+
379
+ last_layer_needed = max(self.vit_layers) + 1
380
+ if last_layer_needed < vit_config.num_hidden_layers:
381
+ new_vit_config = deepcopy(vit_config)
382
+ new_vit_config.num_hidden_layers = last_layer_needed
383
+ self.image_vit = Molmo2VisionTransformer(new_vit_config)
384
+ else:
385
+ self.image_vit = Molmo2VisionTransformer(vit_config)
386
+
387
+ self.num_prefix_tokens: int = self.image_vit.num_prefix_tokens
388
+
389
+ pool_dim = vit_config.hidden_size * len(adapter_config.vit_layers)
390
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
391
+ hidden_size=adapter_config.hidden_size,
392
+ num_heads=adapter_config.num_attention_heads,
393
+ num_key_value_heads=adapter_config.num_key_value_heads,
394
+ head_dim=adapter_config.head_dim,
395
+ input_dim=pool_dim,
396
+ float32_attention=adapter_config.float32_attention,
397
+ attention_dropout=adapter_config.attention_dropout,
398
+ residual_dropout=adapter_config.residual_dropout,
399
+ attn_implementation=adapter_config._attn_implementation,
400
+ )
401
+ self.image_projector = ImageProjectorMLP(
402
+ adapter_config.hidden_size,
403
+ adapter_config.intermediate_size,
404
+ adapter_config.text_hidden_size,
405
+ adapter_config.hidden_act,
406
+ )
407
+ self.image_feature_dropout = nn.Dropout(adapter_config.image_feature_dropout)
408
+
409
+ def encode_image(self, images: torch.Tensor) -> torch.Tensor:
410
+ """
411
+ : param images: (batch_size, num_crops, num_patch, n_pixels)
412
+ """
413
+ B, T, N, D = images.shape
414
+ images = images.view(B * T, N, D)
415
+ image_features = self.image_vit(images)
416
+
417
+ features = []
418
+ for layer in self.vit_layers:
419
+ features.append(image_features[layer])
420
+ image_features = torch.cat(features, dim=-1)
421
+
422
+ if self.num_prefix_tokens > 0:
423
+ image_features = image_features[:, 1:]
424
+ image_features = image_features.view(B, T, N, -1)
425
+ return image_features
426
+
427
+ @property
428
+ def dtype(self) -> torch.dtype:
429
+ return self.image_vit.patch_embedding.weight.dtype
430
+
431
+ @property
432
+ def device(self) -> torch.device:
433
+ return self.image_vit.patch_embedding.weight.device
434
+
435
+ def forward(
436
+ self,
437
+ images: torch.Tensor,
438
+ pooled_patches_idx: torch.Tensor,
439
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
440
+
441
+ # image_features: (batch_size, num_crops(=num_image), num_patch, nximage_emb_dim)
442
+ batch_size, num_image = images.shape[:2]
443
+ images = images.to(device=self.device, dtype=self.dtype)
444
+ image_features = self.encode_image(images)
445
+
446
+ image_features = self.image_feature_dropout(image_features)
447
+ dim = image_features.shape[-1]
448
+ valid = pooled_patches_idx >= 0
449
+ valid_token = torch.any(valid, -1)
450
+
451
+ # Use `pooled_patches_idx` to arange the features for image pooling
452
+ batch_idx = torch.arange(pooled_patches_idx.shape[0], dtype=torch.long, device=pooled_patches_idx.device)
453
+ batch_idx = torch.tile(batch_idx.view(batch_size, 1, 1), [1, pooled_patches_idx.shape[1], pooled_patches_idx.shape[2]])
454
+
455
+ # Now [batch, num_high_res_features, pool_dim, dim]
456
+ to_pool = image_features.reshape(batch_size, -1, dim)[batch_idx, torch.clip(pooled_patches_idx, 0)]
457
+ to_pool = to_pool * valid.to(self.dtype)[:, :, :, None]
458
+ to_pool = to_pool.reshape([-1, pooled_patches_idx.shape[-1], dim])
459
+ if self.adapter_config.pooling_attention_mask:
460
+ attn_mask = valid.reshape([-1, 1, 1, valid.shape[-1]])
461
+ denom = valid.view(-1, to_pool.shape[-2]).float().sum(-1)
462
+ denom = torch.where(denom == 0, 1, denom)
463
+ query = to_pool.sum(-2, keepdim=True) / denom[:, None, None].to(to_pool.dtype)
464
+ else:
465
+ attn_mask = None
466
+ query = to_pool.mean(-2, keepdim=True)
467
+ pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
468
+ pooled_features = pooled_features.reshape([batch_size, -1, pooled_features.shape[-1]])
469
+
470
+ # MLP layer to map the feature.
471
+ pooled_features = self.image_projector(pooled_features)
472
+ return pooled_features.view(-1, pooled_features.shape[-1])[valid_token.flatten()]
473
+
474
+
475
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
476
+ def rotate_half(x):
477
+ """Rotates half the hidden dims of the input."""
478
+ x1 = x[..., : x.shape[-1] // 2]
479
+ x2 = x[..., x.shape[-1] // 2 :]
480
+ return torch.cat((-x2, x1), dim=-1)
481
+
482
+
483
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
484
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
485
+ """Applies Rotary Position Embedding to the query and key tensors.
486
+
487
+ Args:
488
+ q (`torch.Tensor`): The query tensor.
489
+ k (`torch.Tensor`): The key tensor.
490
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
491
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
492
+ position_ids (`torch.Tensor`, *optional*):
493
+ Deprecated and unused.
494
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
495
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
496
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
497
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
498
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
499
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
500
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
501
+ Returns:
502
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
503
+ """
504
+ cos = cos.unsqueeze(unsqueeze_dim)
505
+ sin = sin.unsqueeze(unsqueeze_dim)
506
+ q_embed = (q * cos) + (rotate_half(q) * sin)
507
+ k_embed = (k * cos) + (rotate_half(k) * sin)
508
+ return q_embed, k_embed
509
+
510
+
511
+ class Molmo2RotaryEmbedding(nn.Module):
512
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
513
+
514
+ def __init__(
515
+ self,
516
+ config: Molmo2TextConfig,
517
+ device: Union[str, torch.device] = None,
518
+ rope_type: Optional[str] = None,
519
+ ):
520
+ super().__init__()
521
+ if rope_type is not None:
522
+ self.rope_type = rope_type
523
+ elif hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
524
+ # BC: "rope_type" was originally "type"
525
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
526
+ else:
527
+ self.rope_type = "default"
528
+ self.max_seq_len_cached = config.max_position_embeddings
529
+ self.original_max_seq_len = config.max_position_embeddings
530
+
531
+ self.config = config
532
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
533
+
534
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
535
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
536
+ self.original_inv_freq = self.inv_freq
537
+
538
+ @torch.no_grad()
539
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
540
+ def forward(self, x, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
541
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
542
+ position_ids_expanded = position_ids[:, None, :].float()
543
+
544
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
545
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
546
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
547
+ emb = torch.cat((freqs, freqs), dim=-1)
548
+ cos = emb.cos() * self.attention_scaling
549
+ sin = emb.sin() * self.attention_scaling
550
+
551
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
552
+
553
+
554
+ class Molmo2RMSNorm(nn.Module):
555
+
556
+ def __init__(
557
+ self,
558
+ size: int,
559
+ eps: float = 1e-6,
560
+ device: Union[str, torch.device] = None,
561
+ ):
562
+ super().__init__()
563
+ self.weight = nn.Parameter(torch.ones(size, device=device))
564
+ self.eps = eps
565
+
566
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
567
+ with torch.autocast(enabled=False, device_type=x.device.type):
568
+ og_dtype = x.dtype
569
+ x = x.to(torch.float32)
570
+ variance = x.pow(2).mean(-1, keepdim=True)
571
+ x = x * torch.rsqrt(variance + self.eps)
572
+ x = x.to(og_dtype)
573
+
574
+ return self.weight * x
575
+
576
+ def extra_repr(self):
577
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
578
+
579
+
580
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
581
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
582
+ """
583
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
584
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
585
+ """
586
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
587
+ if n_rep == 1:
588
+ return hidden_states
589
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
590
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
591
+
592
+
593
+ def eager_attention_forward(
594
+ module: nn.Module,
595
+ query: torch.Tensor,
596
+ key: torch.Tensor,
597
+ value: torch.Tensor,
598
+ attention_mask: Optional[torch.Tensor],
599
+ scaling: float,
600
+ dropout: float = 0.0,
601
+ **kwargs,
602
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
603
+ key_states = repeat_kv(key, module.num_key_value_groups)
604
+ value_states = repeat_kv(value, module.num_key_value_groups)
605
+
606
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
607
+ if attention_mask is not None:
608
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
609
+ attn_weights = attn_weights + causal_mask
610
+
611
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
612
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
613
+ attn_output = torch.matmul(attn_weights, value_states)
614
+ attn_output = attn_output.transpose(1, 2).contiguous()
615
+
616
+ return attn_output, attn_weights
617
+
618
+
619
+ class Molmo2Attention(nn.Module):
620
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
621
+
622
+ def __init__(self, config: Molmo2TextConfig, layer_idx: int) -> None:
623
+ super().__init__()
624
+ self.config = config
625
+ self.layer_idx = layer_idx
626
+ self.num_heads = config.num_attention_heads
627
+ self.num_key_value_heads = config.num_key_value_heads
628
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
629
+ self.head_dim = config.head_dim
630
+ self.scaling = self.head_dim**-0.5
631
+ self.is_causal = True
632
+
633
+ self.fused_dims = (
634
+ config.num_attention_heads * config.head_dim,
635
+ config.head_dim * config.num_key_value_heads,
636
+ config.head_dim * config.num_key_value_heads,
637
+ )
638
+ self.att_proj = nn.Linear(
639
+ config.hidden_size,
640
+ sum(self.fused_dims),
641
+ bias=config.qkv_bias,
642
+ )
643
+
644
+ # Layer norms.
645
+ self.k_norm: Optional[Molmo2RMSNorm] = None
646
+ self.q_norm: Optional[Molmo2RMSNorm] = None
647
+ self.qk_norm_type: Optional[str] = None
648
+ if config.use_qk_norm:
649
+ k_norm_size = (
650
+ config.head_dim
651
+ if config.qk_norm_type == "qwen3" else
652
+ config.num_key_value_heads * config.head_dim
653
+ )
654
+ self.k_norm = Molmo2RMSNorm(k_norm_size, eps=config.layer_norm_eps)
655
+ q_norm_size = (
656
+ config.head_dim
657
+ if config.qk_norm_type == "qwen3" else
658
+ config.num_attention_heads * config.head_dim
659
+ )
660
+ self.q_norm = Molmo2RMSNorm(q_norm_size, eps=config.layer_norm_eps)
661
+ self.qk_norm_type = config.qk_norm_type
662
+
663
+ self.attention_dropout = config.attention_dropout
664
+
665
+ self.attn_out = nn.Linear(
666
+ config.head_dim * config.num_attention_heads,
667
+ config.hidden_size,
668
+ bias=False,
669
+ )
670
+
671
+ def forward(
672
+ self,
673
+ hidden_states: torch.Tensor,
674
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
675
+ attention_mask: Optional[torch.Tensor],
676
+ past_key_values: Optional[Cache] = None,
677
+ cache_position: Optional[torch.LongTensor] = None,
678
+ **kwargs: Unpack[FlashAttentionKwargs],
679
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
680
+ input_shape = hidden_states.shape[:-1]
681
+ hidden_shape = (*input_shape, -1, self.head_dim)
682
+
683
+ qkv = self.att_proj(hidden_states)
684
+ query_states, key_states, value_states = qkv.split(self.fused_dims, dim=-1)
685
+ value_states = value_states.view(hidden_shape)
686
+
687
+ # Optionally apply layer norm to keys and queries.
688
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type != "qwen3":
689
+ query_states = self.q_norm(query_states)
690
+ key_states = self.k_norm(key_states)
691
+
692
+ query_states = query_states.view(hidden_shape)
693
+ key_states = key_states.view(hidden_shape)
694
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type == "qwen3":
695
+ query_states = self.q_norm(query_states)
696
+ key_states = self.k_norm(key_states)
697
+ query_states = query_states.transpose(1, 2)
698
+ key_states = key_states.transpose(1, 2)
699
+ value_states = value_states.transpose(1, 2)
700
+
701
+ cos, sin = position_embeddings
702
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
703
+
704
+ if past_key_values is not None:
705
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
706
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
707
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
708
+
709
+ attention_interface: Callable = eager_attention_forward
710
+ if self.config._attn_implementation != "eager":
711
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
712
+
713
+ attn_output, attn_weights = attention_interface(
714
+ self,
715
+ query_states,
716
+ key_states,
717
+ value_states,
718
+ attention_mask,
719
+ dropout=0.0 if not self.training else self.attention_dropout,
720
+ scaling=self.scaling,
721
+ **kwargs,
722
+ )
723
+
724
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
725
+ attn_output = self.attn_out(attn_output)
726
+ return attn_output, attn_weights
727
+
728
+
729
+ class LanguageModelMLP(nn.Module):
730
+
731
+ def __init__(
732
+ self,
733
+ input_dim: int,
734
+ intermediate_size: int,
735
+ hidden_act: str,
736
+ device: Union[str, torch.device] = None,
737
+ ):
738
+ super().__init__()
739
+ self.ff_proj = nn.Linear(input_dim, intermediate_size * 2, bias=False, device=device)
740
+ self.ff_out = nn.Linear(intermediate_size, input_dim, bias=False, device=device)
741
+ self.act = ACT2FN[hidden_act]
742
+
743
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
744
+ x = self.ff_proj(x)
745
+ x, gate = x.chunk(2, dim=-1)
746
+ x = self.act(gate) * x
747
+ x = self.ff_out(x)
748
+ return x
749
+
750
+
751
+ class Molmo2DecoderLayer(GradientCheckpointingLayer):
752
+
753
+ def __init__(
754
+ self,
755
+ config: Molmo2TextConfig,
756
+ layer_idx: Optional[int] = None,
757
+ device: Union[str, torch.device] = None
758
+ ):
759
+ super().__init__()
760
+ self.config = config
761
+
762
+ self.self_attn = Molmo2Attention(config, layer_idx)
763
+ self.attn_norm = Molmo2RMSNorm(
764
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
765
+ self.dropout = nn.Dropout(config.residual_dropout)
766
+ self.mlp = LanguageModelMLP(
767
+ config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
768
+ self.ff_norm = Molmo2RMSNorm(
769
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
770
+
771
+ def forward(
772
+ self,
773
+ hidden_states: torch.Tensor,
774
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
775
+ attention_mask: Optional[torch.Tensor] = None,
776
+ position_ids: Optional[torch.LongTensor] = None,
777
+ past_key_values: Optional[Cache] = None,
778
+ output_attentions: Optional[bool] = False,
779
+ use_cache: Optional[bool] = False,
780
+ cache_position: Optional[torch.LongTensor] = None,
781
+ **kwargs: Unpack[TransformersKwargs],
782
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
783
+
784
+ residual = hidden_states
785
+ hidden_states = self.attn_norm(hidden_states)
786
+
787
+ # Self Attention
788
+ hidden_states, self_attn_weights = self.self_attn(
789
+ hidden_states=hidden_states,
790
+ position_embeddings=position_embeddings,
791
+ attention_mask=attention_mask,
792
+ position_ids=position_ids,
793
+ past_key_values=past_key_values,
794
+ output_attentions=output_attentions,
795
+ use_cache=use_cache,
796
+ cache_position=cache_position,
797
+ **kwargs,
798
+ )
799
+
800
+ hidden_states = residual + self.dropout(hidden_states)
801
+
802
+ # Fully Connected
803
+ residual = hidden_states
804
+ hidden_states = self.ff_norm(hidden_states)
805
+ hidden_states = self.mlp(hidden_states)
806
+
807
+ hidden_states = residual + self.dropout(hidden_states)
808
+
809
+ outputs = (hidden_states,)
810
+
811
+ if output_attentions:
812
+ outputs += (self_attn_weights,)
813
+
814
+ return outputs
815
+
816
+
817
+ class Molmo2PostNormDecoderLayer(Molmo2DecoderLayer):
818
+ def forward(
819
+ self,
820
+ hidden_states: torch.Tensor,
821
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
822
+ attention_mask: Optional[torch.Tensor] = None,
823
+ position_ids: Optional[torch.LongTensor] = None,
824
+ past_key_values: Optional[Cache] = None,
825
+ output_attentions: Optional[bool] = False,
826
+ use_cache: Optional[bool] = False,
827
+ cache_position: Optional[torch.LongTensor] = None,
828
+ **kwargs,
829
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
830
+
831
+ residual = hidden_states
832
+
833
+ # Self Attention
834
+ hidden_states, self_attn_weights = self.self_attn(
835
+ hidden_states=hidden_states,
836
+ position_embeddings=position_embeddings,
837
+ attention_mask=attention_mask,
838
+ position_ids=position_ids,
839
+ past_key_values=past_key_values,
840
+ output_attentions=output_attentions,
841
+ use_cache=use_cache,
842
+ cache_position=cache_position,
843
+ )
844
+ hidden_states = self.attn_norm(hidden_states)
845
+
846
+ hidden_states = residual + self.dropout(hidden_states)
847
+
848
+ # Fully Connected
849
+ residual = hidden_states
850
+ hidden_states = self.mlp(hidden_states)
851
+ hidden_states = self.ff_norm(hidden_states)
852
+
853
+ hidden_states = residual + self.dropout(hidden_states)
854
+
855
+ outputs = (hidden_states,)
856
+
857
+ if output_attentions:
858
+ outputs += (self_attn_weights,)
859
+
860
+ return outputs
861
+
862
+
863
+ class Molmo2Embedding(nn.Module):
864
+ def __init__(
865
+ self,
866
+ num_embeddings: int,
867
+ num_new_embeddings: int,
868
+ features: int,
869
+ device: Union[str, torch.device] = None,
870
+ ):
871
+ super().__init__()
872
+ self.embedding = nn.Parameter(
873
+ torch.zeros(num_embeddings, features, device=device),
874
+ )
875
+ self.new_embedding = nn.Parameter(
876
+ torch.zeros(num_new_embeddings, features, device=device),
877
+ )
878
+
879
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
880
+ return F.embedding(x, torch.cat([self.embedding, self.new_embedding], dim=0))
881
+
882
+
883
+ class Molmo2PreTrainedModel(PreTrainedModel):
884
+ config: Molmo2Config
885
+ base_model_prefix = "model"
886
+ supports_gradient_checkpointing = True
887
+ _no_split_modules = [
888
+ "Molmo2DecoderLayer",
889
+ "Molmo2PostNormDecoderLayer",
890
+ "Molmo2VisionBlock",
891
+ "ViTMultiHeadDotProductAttention",
892
+ ]
893
+ _skip_keys_device_placement = "past_key_values"
894
+ _supports_flash_attn = True
895
+ _supports_sdpa = True
896
+
897
+ _can_compile_fullgraph = True
898
+ _supports_attention_backend = True
899
+ _can_record_outputs = {
900
+ "hidden_states": Molmo2DecoderLayer,
901
+ "attentions": Molmo2Attention,
902
+ }
903
+
904
+ def _init_weights(self, module):
905
+ std = self.config.initializer_range
906
+ if isinstance(module, (nn.Linear,)):
907
+ module.weight.data.normal_(mean=0.0, std=std)
908
+ if module.bias is not None:
909
+ module.bias.data.zero_()
910
+ elif isinstance(module, Molmo2Embedding):
911
+ module.embedding.data.normal_(mean=0.0, std=std)
912
+ module.new_embedding.data.normal_(mean=0.0, std=std)
913
+ elif isinstance(module, nn.Embedding):
914
+ module.weight.data.normal_(mean=0.0, std=std)
915
+ if module.padding_idx is not None:
916
+ module.weight.data[module.padding_idx].zero_()
917
+ elif isinstance(module, Molmo2RMSNorm):
918
+ module.weight.data.fill_(1.0)
919
+ elif isinstance(module, nn.LayerNorm):
920
+ module.weight.data.fill_(1.0)
921
+ if module.bias is not None:
922
+ module.bias.data.zero_()
923
+
924
+
925
+ class Molmo2TextModel(Molmo2PreTrainedModel):
926
+ config: Molmo2TextConfig
927
+ _no_split_modules = ["Molmo2DecoderLayer", "Molmo2PostNormDecoderLayer"]
928
+
929
+ def __init__(self, config: Molmo2TextConfig):
930
+ super().__init__(config)
931
+ if config.additional_vocab_size is not None:
932
+ self.wte = Molmo2Embedding(
933
+ config.vocab_size,
934
+ config.additional_vocab_size,
935
+ config.hidden_size,
936
+ )
937
+ else:
938
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
939
+ self.emb_drop = nn.Dropout(config.embedding_dropout)
940
+ decoder_layer = Molmo2PostNormDecoderLayer if config.norm_after else Molmo2DecoderLayer
941
+ self.blocks = nn.ModuleList(
942
+ [decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
943
+ )
944
+ self.ln_f = Molmo2RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
945
+ if config.rope_scaling_layers is not None:
946
+ self.rotary_embs = nn.ModuleDict(
947
+ {
948
+ "default": Molmo2RotaryEmbedding(config, rope_type="default"),
949
+ "scaling": Molmo2RotaryEmbedding(config),
950
+ }
951
+ )
952
+ else:
953
+ self.rotary_emb = Molmo2RotaryEmbedding(config)
954
+ self.gradient_checkpointing = False
955
+
956
+ # Initialize weights and apply final processing
957
+ self.post_init()
958
+
959
+ def get_input_embeddings(self) -> torch.nn.Module:
960
+ return self.wte
961
+
962
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
963
+ self.wte = value
964
+
965
+ @can_return_tuple
966
+ def forward(
967
+ self,
968
+ input_ids: Optional[torch.LongTensor] = None,
969
+ attention_mask: Optional[torch.Tensor] = None,
970
+ position_ids: Optional[torch.LongTensor] = None,
971
+ past_key_values: Optional[Cache] = None,
972
+ inputs_embeds: Optional[torch.FloatTensor] = None,
973
+ use_cache: Optional[bool] = None,
974
+ output_attentions: Optional[bool] = None,
975
+ output_hidden_states: Optional[bool] = None,
976
+ cache_position: Optional[torch.LongTensor] = None,
977
+ **kwargs: Unpack[TransformersKwargs],
978
+ ) -> BaseModelOutputWithPast:
979
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
980
+ output_hidden_states = (
981
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
982
+ )
983
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
984
+
985
+ if (input_ids is None) ^ (inputs_embeds is not None):
986
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
987
+
988
+ if self.gradient_checkpointing and self.training and use_cache:
989
+ logger.warning_once(
990
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
991
+ )
992
+ use_cache = False
993
+
994
+ if inputs_embeds is None:
995
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
996
+ inputs_embeds = self.wte(input_ids)
997
+
998
+ # torch.jit.trace() doesn't support cache objects in the output
999
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
1000
+ past_key_values = DynamicCache(config=self.config)
1001
+
1002
+ if cache_position is None:
1003
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1004
+ cache_position = torch.arange(
1005
+ past_seen_tokens,
1006
+ past_seen_tokens + inputs_embeds.shape[1],
1007
+ device=inputs_embeds.device,
1008
+ )
1009
+
1010
+ if position_ids is None:
1011
+ position_ids = cache_position.unsqueeze(0)
1012
+
1013
+ # It may already have been prepared by e.g. `generate`
1014
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1015
+ # Prepare mask arguments
1016
+ mask_kwargs = {
1017
+ "config": self.config,
1018
+ "input_embeds": inputs_embeds,
1019
+ "attention_mask": attention_mask,
1020
+ "cache_position": cache_position,
1021
+ "past_key_values": past_key_values,
1022
+ "position_ids": position_ids,
1023
+ }
1024
+
1025
+ # Create the mask
1026
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1027
+
1028
+ hidden_states = inputs_embeds
1029
+
1030
+ # create position embeddings to be shared across the decoder layers
1031
+ if self.config.rope_scaling_layers is not None:
1032
+ position_embeddings_mapping = {
1033
+ "default": self.rotary_embs["default"](hidden_states, position_ids),
1034
+ "scaling": self.rotary_embs["scaling"](hidden_states, position_ids),
1035
+ }
1036
+ else:
1037
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1038
+
1039
+ # decoder layers
1040
+ all_hidden_states = () if output_hidden_states else None
1041
+ all_self_attns = () if output_attentions else None
1042
+
1043
+ for layer_idx, decoder_block in enumerate(self.blocks[: self.config.num_hidden_layers]):
1044
+ if output_hidden_states:
1045
+ all_hidden_states += (hidden_states,)
1046
+
1047
+ if self.config.rope_scaling_layers is not None:
1048
+ position_embeddings_i = (
1049
+ position_embeddings_mapping["scaling"]
1050
+ if layer_idx in self.config.rope_scaling_layers
1051
+ else position_embeddings_mapping["default"]
1052
+ )
1053
+ else:
1054
+ position_embeddings_i = position_embeddings
1055
+
1056
+ layer_outputs = decoder_block(
1057
+ hidden_states,
1058
+ attention_mask=causal_mask_mapping,
1059
+ position_ids=position_ids,
1060
+ past_key_values=past_key_values,
1061
+ output_attentions=output_attentions,
1062
+ use_cache=use_cache,
1063
+ cache_position=cache_position,
1064
+ position_embeddings=position_embeddings_i,
1065
+ **kwargs,
1066
+ )
1067
+
1068
+ hidden_states = layer_outputs[0]
1069
+
1070
+ if output_attentions:
1071
+ all_self_attns += (layer_outputs[1],)
1072
+
1073
+ hidden_states = self.ln_f(hidden_states)
1074
+
1075
+ # add hidden states from the last decoder layer
1076
+ if output_hidden_states:
1077
+ all_hidden_states += (hidden_states,)
1078
+
1079
+ return BaseModelOutputWithPast(
1080
+ last_hidden_state=hidden_states,
1081
+ past_key_values=past_key_values,
1082
+ hidden_states=all_hidden_states,
1083
+ attentions=all_self_attns,
1084
+ )
1085
+
1086
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1087
+ def token_type_ids_mask_function(
1088
+ token_type_ids: Optional[torch.Tensor] = None,
1089
+ ) -> Optional[Callable]:
1090
+ """
1091
+ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
1092
+ not start and end indices.
1093
+ """
1094
+ # Do not return an additional mask in this case
1095
+ if token_type_ids is None:
1096
+ return None
1097
+
1098
+ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
1099
+ # If it's 1 for both query and key/value, we are in an image block
1100
+ # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length
1101
+ # Since vmap doesn't support `if statement` we workaround it with `torch.where`
1102
+ safe_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0)
1103
+ token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_idx]
1104
+ token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0)
1105
+
1106
+ is_image_block = (token_type_ids[batch_idx, q_idx] == 1) & (token_type_ids_at_kv_idx == 1)
1107
+
1108
+ # This is bidirectional attention whenever we are dealing with image tokens
1109
+ return is_image_block & is_image_block
1110
+
1111
+ return inner_mask
1112
+
1113
+
1114
+ class Molmo2Model(Molmo2PreTrainedModel):
1115
+ base_model_prefix = ""
1116
+ _checkpoint_conversion_mapping = {}
1117
+ # Reference: fix gemma3 grad acc #37208
1118
+ accepts_loss_kwargs = False
1119
+ config: Molmo2Config
1120
+
1121
+
1122
+ def __init__(self, config: Molmo2Config):
1123
+ super().__init__(config)
1124
+ self.transformer: Molmo2TextModel = Molmo2TextModel(config.text_config)
1125
+ self.vision_backbone: Optional[Molmo2VisionBackbone] = None
1126
+ if config.vit_config is not None and config.adapter_config is not None:
1127
+ self.vision_backbone = Molmo2VisionBackbone(config.vit_config, config.adapter_config)
1128
+
1129
+ # Initialize weights and apply final processing
1130
+ self.post_init()
1131
+
1132
+ def get_input_embeddings(self) -> torch.nn.Module:
1133
+ return self.transformer.wte
1134
+
1135
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1136
+ self.transformer.wte = value
1137
+
1138
+ def set_decoder(self, decoder):
1139
+ self.transformer = decoder
1140
+
1141
+ def get_decoder(self):
1142
+ return self.transformer
1143
+
1144
+ @property
1145
+ def device(self) -> torch.device:
1146
+ return self.transformer.ln_f.weight.device
1147
+
1148
+ def build_batched_images(
1149
+ self,
1150
+ input_ids: torch.LongTensor,
1151
+ pixel_values: torch.Tensor,
1152
+ image_token_pooling: torch.Tensor,
1153
+ image_grids: torch.Tensor,
1154
+ image_num_crops: torch.Tensor,
1155
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1156
+ # 1) Count the number of images in each example
1157
+ raw_counts = (input_ids == self.config.image_end_token_id).sum(1) # [N]
1158
+ # Each image is represented by global view and high-res view
1159
+ # so we divide by 2 to get the number of images
1160
+ counts = raw_counts // 2
1161
+ N = counts.size(0)
1162
+ device = input_ids.device
1163
+
1164
+ # Total number of images in the batch
1165
+ num_images = int(counts.sum().item())
1166
+
1167
+ # Sanity check
1168
+ assert image_grids.size(0) == num_images, \
1169
+ f"Expected {num_images} image grids, but got {image_grids.size(0)}"
1170
+ assert image_num_crops.size(0) == num_images, \
1171
+ f"Expected {num_images} image num crops, but got {image_num_crops.size(0)}"
1172
+
1173
+ # 1-1) Compute per-image pooled patch count from image grids
1174
+ with torch.no_grad():
1175
+ first_prod = image_grids[:, :2].prod(dim=1) # [num_images]
1176
+ second_prod = image_grids[:, 2:].prod(dim=1) # [num_images]
1177
+ num_pooled_patches_per_image = (first_prod + second_prod).to(image_num_crops.dtype) # [num_images]
1178
+
1179
+ # pixel_values: [n_crops, n_patches, pixels_per_patch]
1180
+ n_crops, n_patches, pixels_per_patch = pixel_values.shape
1181
+
1182
+ # 2) Map each image index → example index
1183
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1184
+ example_ids_for_image = torch.arange(N, device=device).repeat_interleave(counts) # [num_images]
1185
+ assert example_ids_for_image.numel() == num_images
1186
+
1187
+ # 2-1) Compute crops_per_example by summing per-image crop counts
1188
+ crops_per_example = torch.zeros(
1189
+ N, dtype=image_num_crops.dtype, device=image_num_crops.device
1190
+ )
1191
+ crops_per_example.index_add_(0, example_ids_for_image, image_num_crops) # [N]
1192
+
1193
+ # 2-2) Per-image number of patches = (crops per image) * n_patches
1194
+ patches_per_image = image_num_crops * n_patches # [num_images]
1195
+
1196
+ # 2-3) Compute per-example per-image patch offsets
1197
+ counts_list = counts.tolist()
1198
+ index_offset_per_example_list = []
1199
+ offset_img = 0
1200
+ for c in counts_list:
1201
+ per_img_patches = patches_per_image[offset_img:offset_img + c] # [c]
1202
+ # Offsets: [0, img0_total_patches, img0+img1_total_patches, ...]
1203
+ index_offset = [0] + per_img_patches.cumsum(0).tolist()[:-1]
1204
+ index_offset_per_example_list.append(index_offset)
1205
+ offset_img += c
1206
+
1207
+ # 2-4) Compute num_pooled_patches_per_example
1208
+ num_pooled_patches_per_example = torch.zeros(
1209
+ N, dtype=num_pooled_patches_per_image.dtype, device=num_pooled_patches_per_image.device
1210
+ )
1211
+ num_pooled_patches_per_example.index_add_(
1212
+ 0, example_ids_for_image, num_pooled_patches_per_image
1213
+ )
1214
+
1215
+ # Sanity checks
1216
+ total_crops = int(crops_per_example.sum().item())
1217
+ assert total_crops == n_crops, \
1218
+ f"Expected {total_crops} crops, but got {n_crops}"
1219
+
1220
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1221
+ assert total_num_pooled_patches == image_token_pooling.size(0), \
1222
+ f"Expected {total_num_pooled_patches} pooled patches, but got {image_token_pooling.size(0)}"
1223
+
1224
+ # 3) Build images tensor filled with -1
1225
+ M = int(crops_per_example.max().item())
1226
+ images = torch.full(
1227
+ (N, M, n_patches, pixels_per_patch),
1228
+ fill_value=-1,
1229
+ dtype=pixel_values.dtype,
1230
+ device=pixel_values.device,
1231
+ )
1232
+
1233
+ # 4) Fill images with per-example slices from pixel_values
1234
+ offset_crop = 0
1235
+ for i in range(N):
1236
+ num = int(crops_per_example[i].item())
1237
+ cur = pixel_values[offset_crop:offset_crop + num] # [num, n_patches, pixels_per_patch]
1238
+ images[i, :num] = cur
1239
+ offset_crop += num
1240
+
1241
+ # Sanity check
1242
+ assert offset_crop == n_crops
1243
+
1244
+ # 5) Build new_token_pooling tensor filled with -1
1245
+ P = int(num_pooled_patches_per_example.max().item())
1246
+ _, dim = image_token_pooling.shape
1247
+ new_token_pooling = torch.full(
1248
+ (N, P, dim),
1249
+ fill_value=-1,
1250
+ dtype=image_token_pooling.dtype,
1251
+ device=image_token_pooling.device,
1252
+ )
1253
+
1254
+ # 6) Fill token_pooling with per-example slices, adding per-image patch offsets
1255
+ patch_offset = 0
1256
+ img_offset = 0
1257
+
1258
+ for i, c in enumerate(counts_list):
1259
+ num_patches = int(num_pooled_patches_per_example[i].item())
1260
+
1261
+ # Subsequence of pooled tokens belonging to this example
1262
+ cur = image_token_pooling[patch_offset:patch_offset + num_patches].clone() # [num_patches, dim]
1263
+
1264
+ index_offset_per_example = index_offset_per_example_list[i] # length = c
1265
+ per_img_pooled = num_pooled_patches_per_image[img_offset:img_offset + c] # [c]
1266
+
1267
+ assert len(index_offset_per_example) == per_img_pooled.numel()
1268
+
1269
+ # Apply per-image offsets to the (ragged) subsequence
1270
+ offset = 0
1271
+ for j in range(c):
1272
+ index_offset = int(index_offset_per_example[j])
1273
+ n = int(per_img_pooled[j].item())
1274
+ cur_slice = cur[offset:offset + n]
1275
+
1276
+ # Apply offset across all columns
1277
+ cur[offset:offset + n] = torch.where(
1278
+ cur_slice >= 0,
1279
+ cur_slice + index_offset,
1280
+ cur_slice,
1281
+ )
1282
+ offset += n
1283
+
1284
+ new_token_pooling[i, :num_patches] = cur
1285
+
1286
+ patch_offset += num_patches
1287
+ img_offset += c
1288
+
1289
+ # Final sanity checks
1290
+ assert patch_offset == total_num_pooled_patches
1291
+ assert img_offset == num_images
1292
+
1293
+ return images, new_token_pooling
1294
+
1295
+ def build_batched_videos(
1296
+ self,
1297
+ input_ids: torch.LongTensor,
1298
+ pixel_values_videos: torch.Tensor,
1299
+ video_token_pooling: torch.Tensor,
1300
+ video_grids: torch.Tensor,
1301
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1302
+
1303
+ # 1) Count the number of videos in each example
1304
+ if self.config.use_frame_special_tokens:
1305
+ end_token_id = self.config.frame_end_token_id
1306
+ else:
1307
+ end_token_id = self.config.image_end_token_id
1308
+ counts = (input_ids == end_token_id).any(dim=1).long() # [N]
1309
+ N = counts.size(0)
1310
+ device = input_ids.device
1311
+
1312
+ # Total number of videos in the batch
1313
+ num_videos = int(counts.sum().item())
1314
+
1315
+ # Sanity check
1316
+ assert video_grids.size(0) == num_videos, \
1317
+ f"Expected {num_videos} videos, but got {video_grids.size(0)}"
1318
+
1319
+ video_num_frames = video_grids[:, 0] # [num_videos]
1320
+ num_pooled_patches_per_video = video_grids.prod(dim=1) # [num_videos]
1321
+
1322
+ # pixel_values_videos: [n_frames, n_patches, pixels_per_patch]
1323
+ n_frames, n_patches, pixels_per_patch = pixel_values_videos.shape
1324
+
1325
+ # 2) Map each video index -> example index
1326
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1327
+ example_ids_for_video = torch.arange(N, device=device).repeat_interleave(counts) # [num_videos]
1328
+ assert example_ids_for_video.numel() == num_videos
1329
+
1330
+ # 2-1) Compute frames_per_example by summing per-video frame counts
1331
+ frames_per_example = torch.zeros(
1332
+ N, dtype=video_num_frames.dtype, device=device,
1333
+ )
1334
+ frames_per_example.index_add_(0, example_ids_for_video, video_num_frames) # [N]
1335
+
1336
+ # 2-2) Compute num_pooled_patches_per_example
1337
+ num_pooled_patches_per_example = torch.zeros(
1338
+ N, dtype=num_pooled_patches_per_video.dtype, device=num_pooled_patches_per_video.device,
1339
+ )
1340
+ num_pooled_patches_per_example.index_add_(
1341
+ 0, example_ids_for_video, num_pooled_patches_per_video,
1342
+ )
1343
+
1344
+ # Sanity checks
1345
+ total_frames = int(frames_per_example.sum().item())
1346
+ assert total_frames == n_frames, \
1347
+ f"Expected {total_frames} frames, but got {n_frames}"
1348
+
1349
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1350
+ assert total_num_pooled_patches == video_token_pooling.size(0), \
1351
+ f"Expected {total_num_pooled_patches} pooled patches, but got {video_token_pooling.size(0)}"
1352
+
1353
+ # 3) Build videos tensor filled with -1
1354
+ M = int(frames_per_example.max().item())
1355
+ videos = torch.full(
1356
+ (N, M, n_patches, pixels_per_patch),
1357
+ fill_value=-1,
1358
+ dtype=pixel_values_videos.dtype,
1359
+ device=device,
1360
+ )
1361
+
1362
+ # 4) Fill videos with per-examples slices from pixel_values_videos
1363
+ offset_frame = 0
1364
+ for i in range(N):
1365
+ num = int(frames_per_example[i].item())
1366
+ cur = pixel_values_videos[offset_frame:offset_frame + num] # [num, n_patches, pixels_per_patch]
1367
+ videos[i, :num] = cur
1368
+ offset_frame += num
1369
+
1370
+ # Sanity check
1371
+ assert offset_frame == n_frames
1372
+
1373
+ # 5) Build new token_pooling tensor filled with -1
1374
+ P = int(num_pooled_patches_per_example.max().item())
1375
+ _, dim = video_token_pooling.shape
1376
+ new_token_pooling = torch.full(
1377
+ (N, P, dim),
1378
+ fill_value=-1,
1379
+ dtype=video_token_pooling.dtype,
1380
+ device=video_token_pooling.device,
1381
+ )
1382
+
1383
+ # 6) Fill new token_pooling with per-examples slices from video_token_pooling
1384
+ patch_offset = 0
1385
+ for i in range(N):
1386
+ num_patches = int(num_pooled_patches_per_example[i].item())
1387
+ cur = video_token_pooling[patch_offset:patch_offset + num_patches] # [num_patches, dim]
1388
+ new_token_pooling[i, :num_patches] = cur
1389
+ patch_offset += num_patches
1390
+
1391
+ # Final sanity checks
1392
+ assert patch_offset == total_num_pooled_patches
1393
+
1394
+ return videos, new_token_pooling
1395
+
1396
+ def merge_visual_inputs(
1397
+ self,
1398
+ input_ids: Optional[torch.LongTensor] = None,
1399
+ pixel_values: Optional[torch.Tensor] = None,
1400
+ image_token_pooling: Optional[torch.Tensor] = None,
1401
+ image_grids: Optional[torch.Tensor] = None,
1402
+ image_num_crops: Optional[torch.Tensor] = None,
1403
+ pixel_values_videos: Optional[torch.Tensor] = None,
1404
+ video_token_pooling: Optional[torch.Tensor] = None,
1405
+ video_grids: Optional[torch.Tensor] = None,
1406
+ ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
1407
+ if pixel_values is not None and pixel_values_videos is not None:
1408
+ raise ValueError("pixel_values and pixel_values_videos are provided at the same time")
1409
+ elif pixel_values is not None:
1410
+ assert input_ids is not None
1411
+ images, token_pooling = self.build_batched_images(
1412
+ input_ids=input_ids,
1413
+ pixel_values=pixel_values,
1414
+ image_token_pooling=image_token_pooling,
1415
+ image_grids=image_grids,
1416
+ image_num_crops=image_num_crops,
1417
+ )
1418
+ elif pixel_values_videos is not None:
1419
+ assert input_ids is not None
1420
+ images, token_pooling = self.build_batched_videos(
1421
+ input_ids=input_ids,
1422
+ pixel_values_videos=pixel_values_videos,
1423
+ video_token_pooling=video_token_pooling,
1424
+ video_grids=video_grids,
1425
+ )
1426
+ else:
1427
+ images, token_pooling = None, None
1428
+ return images, token_pooling
1429
+
1430
+ def build_input_embeddings(
1431
+ self,
1432
+ input_ids: torch.LongTensor,
1433
+ images: Optional[torch.FloatTensor] = None, # image inputs
1434
+ token_pooling: Optional[torch.LongTensor] = None,
1435
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
1436
+
1437
+ # Get embeddings of input.
1438
+ # shape: (batch_size, seq_len, d_model)
1439
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1440
+ x = self.transformer.wte(input_ids)
1441
+
1442
+ image_features: Optional[torch.FloatTensor] = None
1443
+ if images is not None:
1444
+ image_features = self.vision_backbone(images, token_pooling).to(x.device)
1445
+ is_image_patch = input_ids.view(-1) == self.config.image_patch_id
1446
+ assert is_image_patch.sum() == len(image_features)
1447
+ x.view(-1, x.shape[-1])[is_image_patch] += image_features
1448
+
1449
+ # shape: (batch_size, seq_len, d_model)
1450
+ x = self.transformer.emb_drop(x) # type: ignore
1451
+
1452
+ return x, image_features
1453
+
1454
+ @can_return_tuple
1455
+ def forward(
1456
+ self,
1457
+ input_ids: Optional[torch.LongTensor] = None,
1458
+ pixel_values: Optional[torch.FloatTensor] = None,
1459
+ image_token_pooling: Optional[torch.Tensor] = None,
1460
+ image_grids: Optional[torch.Tensor] = None,
1461
+ image_num_crops: Optional[torch.Tensor] = None,
1462
+ pixel_values_videos: Optional[torch.Tensor] = None,
1463
+ video_token_pooling: Optional[torch.Tensor] = None,
1464
+ video_grids: Optional[torch.Tensor] = None,
1465
+ attention_mask: Optional[torch.Tensor] = None,
1466
+ position_ids: Optional[torch.Tensor] = None,
1467
+ past_key_values: Optional[Cache] = None,
1468
+ token_type_ids: Optional[torch.LongTensor] = None,
1469
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1470
+ use_cache: Optional[bool] = None,
1471
+ output_attentions: Optional[bool] = None,
1472
+ output_hidden_states: Optional[bool] = None,
1473
+ cache_position: Optional[torch.LongTensor] = None,
1474
+ **kwargs: Unpack[TransformersKwargs],
1475
+ ) -> Union[tuple, Molmo2ModelOutputWithPast]:
1476
+
1477
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1478
+ output_hidden_states = (
1479
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1480
+ )
1481
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1482
+
1483
+ if (input_ids is None) ^ (inputs_embeds is not None):
1484
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1485
+
1486
+ images, token_pooling = self.merge_visual_inputs(
1487
+ input_ids=input_ids,
1488
+ pixel_values=pixel_values,
1489
+ image_token_pooling=image_token_pooling,
1490
+ image_grids=image_grids,
1491
+ image_num_crops=image_num_crops,
1492
+ pixel_values_videos=pixel_values_videos,
1493
+ video_token_pooling=video_token_pooling,
1494
+ video_grids=video_grids,
1495
+ )
1496
+
1497
+ if images is not None and inputs_embeds is not None:
1498
+ raise ValueError(
1499
+ "You cannot specify both images and inputs_embeds at the same time."
1500
+ )
1501
+
1502
+ if inputs_embeds is None:
1503
+ inputs_embeds, image_features = self.build_input_embeddings(
1504
+ input_ids, images, token_pooling,
1505
+ )
1506
+
1507
+ if cache_position is None:
1508
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1509
+ cache_position = torch.arange(
1510
+ past_seen_tokens,
1511
+ past_seen_tokens + inputs_embeds.shape[1],
1512
+ device=inputs_embeds.device,
1513
+ )
1514
+
1515
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1516
+ # It may already have been prepared by e.g. `generate`
1517
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1518
+ # Prepare mask arguments
1519
+ mask_kwargs = {
1520
+ "config": self.config.get_text_config(),
1521
+ "input_embeds": inputs_embeds,
1522
+ "attention_mask": attention_mask,
1523
+ "cache_position": cache_position,
1524
+ "past_key_values": past_key_values,
1525
+ "position_ids": position_ids,
1526
+ }
1527
+
1528
+ # NOTE: this `is_prefill` logic is not flawless, it fails when we're using a cache eagerly initialized
1529
+ # (e.g. compiled prefill) AND `images` are not provided. Determining prefill in that case requires
1530
+ # checking data values, which is not compile-compatible.
1531
+ is_prefill = (
1532
+ not use_cache
1533
+ or past_key_values is None
1534
+ or not past_key_values.is_initialized
1535
+ or images is not None
1536
+ )
1537
+ if token_type_ids is not None and is_prefill:
1538
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1539
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1540
+ token_type_ids.to(cache_position.device)
1541
+ )
1542
+
1543
+ # Create the mask
1544
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1545
+
1546
+ outputs = self.transformer(
1547
+ attention_mask=causal_mask_mapping,
1548
+ position_ids=position_ids,
1549
+ past_key_values=past_key_values,
1550
+ inputs_embeds=inputs_embeds,
1551
+ use_cache=use_cache,
1552
+ output_attentions=output_attentions,
1553
+ output_hidden_states=output_hidden_states,
1554
+ cache_position=cache_position,
1555
+ **kwargs,
1556
+ )
1557
+
1558
+ return Molmo2ModelOutputWithPast(
1559
+ last_hidden_state=outputs.last_hidden_state,
1560
+ past_key_values=outputs.past_key_values,
1561
+ hidden_states=outputs.hidden_states,
1562
+ attentions=outputs.attentions,
1563
+ image_hidden_states=image_features if images is not None else None,
1564
+ )
1565
+
1566
+
1567
+ class Molmo2ForConditionalGeneration(Molmo2PreTrainedModel, GenerationMixin):
1568
+ _checkpoint_conversion_mapping = {}
1569
+ _tied_weights_keys = [] # Weights are not tied
1570
+ # Reference: fix gemma3 grad acc #37208
1571
+ accepts_loss_kwargs = False
1572
+ config: Molmo2Config
1573
+
1574
+ def __init__(self, config: Molmo2Config):
1575
+ super().__init__(config)
1576
+
1577
+ self.model = Molmo2Model(config)
1578
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1579
+ self.vocab_size = config.vocab_size
1580
+
1581
+ # Initialize weights and apply final processing
1582
+ self.post_init()
1583
+
1584
+ def get_input_embeddings(self) -> torch.nn.Module:
1585
+ return self.model.transformer.wte
1586
+
1587
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1588
+ self.model.transformer.wte = value
1589
+
1590
+ def set_decoder(self, decoder):
1591
+ self.model.set_decoder(decoder)
1592
+
1593
+ def get_decoder(self):
1594
+ return self.model.get_decoder()
1595
+
1596
+ # Make modules available throught conditional class for BC
1597
+ @property
1598
+ def language_model(self) -> torch.nn.Module:
1599
+ return self.model.transformer
1600
+
1601
+ @property
1602
+ def vision_backbone(self) -> torch.nn.Module:
1603
+ return self.model.vision_backbone
1604
+
1605
+ @can_return_tuple
1606
+ def forward(
1607
+ self,
1608
+ input_ids: torch.LongTensor = None,
1609
+ pixel_values: Optional[torch.Tensor] = None,
1610
+ image_token_pooling: Optional[torch.Tensor] = None,
1611
+ image_grids: Optional[torch.Tensor] = None,
1612
+ image_num_crops: Optional[torch.Tensor] = None,
1613
+ pixel_values_videos: Optional[torch.Tensor] = None,
1614
+ video_token_pooling: Optional[torch.Tensor] = None,
1615
+ video_grids: Optional[torch.Tensor] = None,
1616
+ attention_mask: Optional[torch.Tensor] = None,
1617
+ position_ids: Optional[torch.LongTensor] = None,
1618
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1619
+ token_type_ids: Optional[torch.LongTensor] = None,
1620
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1621
+ labels: Optional[torch.LongTensor] = None,
1622
+ use_cache: Optional[bool] = None,
1623
+ output_attentions: Optional[bool] = None,
1624
+ output_hidden_states: Optional[bool] = None,
1625
+ cache_position: Optional[torch.LongTensor] = None,
1626
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1627
+ **kwargs: Unpack[TransformersKwargs],
1628
+ ) -> Union[tuple, Molmo2CausalLMOutputWithPast]:
1629
+ r"""
1630
+ ```python
1631
+ >>> from PIL import Image
1632
+ >>> import requests
1633
+ >>> from transformers import AutoProcessor, Molmo2ForConditionalGeneration
1634
+
1635
+ >>> model = Molmo2ForConditionalGeneration.from_pretrained("...")
1636
+ >>> processor = AutoProcessor.from_pretrained("...")
1637
+
1638
+ >>> prompt = "What's the content of the image?"
1639
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1640
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1641
+
1642
+ >>> messages = [{"role": "user", "content": [{"type": "text", "text": prompt}, {"type": "image", "image": image}]}]
1643
+
1644
+ >>> inputs = processor.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True)
1645
+
1646
+ >>> # Generate
1647
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=15)
1648
+ >>> generated_tokens = generated_ids[:, inputs['input_ids'].size(1):]
1649
+ >>> processor.post_process_image_text_to_text(generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1650
+ "The image shows a bustling street scene in what appears to be a Chinatown area. There's ..."
1651
+ ```"""
1652
+ outputs = self.model(
1653
+ input_ids=input_ids,
1654
+ pixel_values=pixel_values,
1655
+ image_token_pooling=image_token_pooling,
1656
+ image_grids=image_grids,
1657
+ image_num_crops=image_num_crops,
1658
+ pixel_values_videos=pixel_values_videos,
1659
+ video_token_pooling=video_token_pooling,
1660
+ video_grids=video_grids,
1661
+ attention_mask=attention_mask,
1662
+ position_ids=position_ids,
1663
+ past_key_values=past_key_values,
1664
+ token_type_ids=token_type_ids,
1665
+ inputs_embeds=inputs_embeds,
1666
+ use_cache=use_cache,
1667
+ output_attentions=output_attentions,
1668
+ output_hidden_states=output_hidden_states,
1669
+ cache_position=cache_position,
1670
+ **kwargs,
1671
+ )
1672
+
1673
+ hidden_states = outputs.last_hidden_state
1674
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1675
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1676
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1677
+
1678
+ loss = None
1679
+ if labels is not None:
1680
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size)
1681
+
1682
+ return Molmo2CausalLMOutputWithPast(
1683
+ loss=loss,
1684
+ logits=logits,
1685
+ past_key_values=outputs.past_key_values,
1686
+ hidden_states=outputs.hidden_states,
1687
+ attentions=outputs.attentions,
1688
+ image_hidden_states=outputs.image_hidden_states,
1689
+ )
1690
+
1691
+ def prepare_inputs_for_generation(
1692
+ self,
1693
+ input_ids: torch.LongTensor,
1694
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1695
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1696
+ pixel_values: Optional[torch.FloatTensor] = None,
1697
+ image_token_pooling: Optional[torch.Tensor] = None,
1698
+ image_grids: Optional[torch.Tensor] = None,
1699
+ image_num_crops: Optional[torch.Tensor] = None,
1700
+ pixel_values_videos: Optional[torch.Tensor] = None,
1701
+ video_token_pooling: Optional[torch.Tensor] = None,
1702
+ video_grids: Optional[torch.Tensor] = None,
1703
+ attention_mask: Optional[torch.Tensor] = None,
1704
+ token_type_ids: Optional[torch.LongTensor] = None,
1705
+ cache_position: Optional[torch.LongTensor] = None,
1706
+ logits_to_keep: Optional[Union[int, torch.Tensor]] = None,
1707
+ **kwargs,
1708
+ ):
1709
+
1710
+ model_inputs = super().prepare_inputs_for_generation(
1711
+ input_ids,
1712
+ past_key_values=past_key_values,
1713
+ inputs_embeds=inputs_embeds,
1714
+ attention_mask=attention_mask,
1715
+ cache_position=cache_position,
1716
+ logits_to_keep=logits_to_keep,
1717
+ token_type_ids=token_type_ids,
1718
+ **kwargs,
1719
+ )
1720
+
1721
+ if cache_position[0] == 0:
1722
+ model_inputs["pixel_values"] = pixel_values
1723
+ model_inputs["image_token_pooling"] = image_token_pooling
1724
+ model_inputs["image_grids"] = image_grids
1725
+ model_inputs["image_num_crops"] = image_num_crops
1726
+ model_inputs["pixel_values_videos"] = pixel_values_videos
1727
+ model_inputs["video_token_pooling"] = video_token_pooling
1728
+ model_inputs["video_grids"] = video_grids
1729
+
1730
+ return model_inputs
1731
+
1732
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1733
+ @staticmethod
1734
+ def create_masks_for_generate(
1735
+ config: PretrainedConfig,
1736
+ input_embeds: torch.Tensor,
1737
+ attention_mask: Optional[torch.Tensor],
1738
+ cache_position: torch.Tensor,
1739
+ past_key_values: Optional[Cache],
1740
+ position_ids: Optional[torch.Tensor],
1741
+ token_type_ids: Optional[torch.Tensor] = None,
1742
+ **kwargs,
1743
+ ) -> dict:
1744
+ # Prepare mask arguments
1745
+ mask_kwargs = {
1746
+ "config": config.get_text_config(),
1747
+ "input_embeds": input_embeds,
1748
+ "attention_mask": attention_mask,
1749
+ "cache_position": cache_position,
1750
+ "past_key_values": past_key_values,
1751
+ "position_ids": position_ids,
1752
+ }
1753
+ # Add the token type ids mask for generate as well
1754
+ if token_type_ids is not None and input_embeds.shape[1] != 1:
1755
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1756
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1757
+ token_type_ids.to(cache_position.device)
1758
+ )
1759
+
1760
+ return create_masks_for_generate(**mask_kwargs)
1761
+
1762
+
1763
+ # Always register for multi-modal features
1764
+ AutoModelForImageTextToText.register(Molmo2Config, Molmo2ForConditionalGeneration)
modeling_molmo_point.py ADDED
@@ -0,0 +1,1927 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import re
3
+ from copy import deepcopy
4
+ from dataclasses import dataclass
5
+ from typing import Optional, Union, Callable, Any, List, Tuple
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torch import nn
10
+
11
+ from torch.nn import functional as F
12
+ from transformers import LogitsProcessorList, LogitsProcessor, AutoProcessor, ViTConfig
13
+ from transformers.image_utils import PILImageResampling
14
+
15
+ from transformers.models.auto import AutoModelForImageTextToText
16
+ from transformers.activations import ACT2FN
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.cache_utils import Cache, DynamicCache
19
+ from transformers.generation import GenerationMixin
20
+ from transformers.masking_utils import create_causal_mask, create_masks_for_generate
21
+ from transformers.modeling_flash_attention_utils import (
22
+ _flash_attention_forward,
23
+ FlashAttentionKwargs,
24
+ flash_attn_supports_top_left_mask,
25
+ )
26
+ from transformers.modeling_layers import GradientCheckpointingLayer
27
+ from transformers.modeling_outputs import (
28
+ BaseModelOutputWithPast,
29
+ )
30
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
31
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
32
+ from transformers.processing_utils import Unpack
33
+ from transformers.utils import (
34
+ ModelOutput,
35
+ TransformersKwargs,
36
+ can_return_tuple,
37
+ logging,
38
+ )
39
+
40
+ from .configuration_molmo2 import Molmo2VitConfig, Molmo2TextConfig, Molmo2AdapterConfig
41
+ from .configuration_molmo_point import MolmoPointConfig, MolmoPointAdapterConfig
42
+ from .image_processing_molmo2 import Molmo2ImagesKwargs, image_to_patches_and_grids
43
+ from .modeling_molmo2 import ImageProjectorMLP, Molmo2VisionTransformer, Molmo2RMSNorm, \
44
+ Molmo2RotaryEmbedding, Molmo2PostNormDecoderLayer, Molmo2DecoderLayer, Molmo2Attention, \
45
+ Molmo2Embedding
46
+
47
+ # FIXME remove
48
+ processor = None
49
+ def decode(ids):
50
+ global processor
51
+ if processor is None:
52
+ processor = AutoProcessor.from_pretrained(
53
+ "/weka/oe-training-default/mm-olmo/released-models-molmo2-point-0326/MolmoPoint-8B/hf-step2000", trust_remote_code=True,
54
+ padding_side="left")
55
+ return processor.post_process_image_text_to_text(ids.view(1), skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
56
+
57
+
58
+ logger = logging.get_logger(__name__)
59
+ NO_POINTS_LABEL = 1000000
60
+
61
+
62
+ EXTRACT_POINT_TRIPLE = re.compile(f"<POINT_(\d+)> ?<POINT_(\d+)> ?<POINT_(\d+)> ?([0-9]+)" )
63
+
64
+
65
+ def get_subpatch_ids(output_text, pooling, no_more_points_class):
66
+ n_patches, n_subpatches = pooling.shape[-2:]
67
+ if no_more_points_class:
68
+ n_patches += 1
69
+ for match in EXTRACT_POINT_TRIPLE.finditer(output_text):
70
+ patch_id, subpatch_num = int(match.group(1)), int(match.group(2))
71
+ subpatch_id = subpatch_num - n_patches
72
+ location_num = int(match.group(3))
73
+ location_id = location_num - n_patches - n_subpatches
74
+ example_id = int(match.group(4))
75
+ vit_patch_id = pooling[patch_id, subpatch_id]
76
+ yield vit_patch_id, location_id, example_id
77
+
78
+
79
+ @dataclass
80
+ class ImageCache:
81
+ """Extra stuff we need to cache when doing autoregressive generation with pointing"""
82
+
83
+ patch_k: torch.FloatTensor
84
+ """K values of the image tokens"""
85
+
86
+ patch_k_mask: torch.BoolTensor
87
+ """Mask over image tokens that can be selected"""
88
+
89
+ subpatch_k: torch.FloatTensor
90
+ """K values of the ViT patches before pooling"""
91
+
92
+ token_pooling: torch.LongTensor
93
+ """token pooling array mapping image_patch_id -> ViT patches pooled for that patch"""
94
+
95
+ vit_features: torch.FloatTensor
96
+ """Features before pooling, used for building input embeddings"""
97
+
98
+ image_pos_ids: Optional[torch.LongTensor] = None
99
+ """Position ids of the image tokens if need for rotary embeddings"""
100
+
101
+ image_features0: Optional[torch.FloatTensor] = None
102
+ """"Image features, might be needed to embed new patch prediction tokens"""
103
+
104
+ flat_image_tokens_to_flat_image_features: Optional[torch.LongTensor] = None
105
+ """Cached for indexing uses"""
106
+
107
+
108
+ @dataclass
109
+ class MolmoPointCausalLMOutputWithPast(ModelOutput):
110
+ """
111
+ Base class for MolmoPoint causal language model (or autoregressive) outputs.
112
+
113
+ Args:
114
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
115
+ Language modeling loss (for next-token prediction).
116
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
117
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
118
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
119
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
120
+
121
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
122
+ `past_key_values` input) to speed up sequential decoding.
123
+ image_hidden_states (`torch.FloatTensor`, *optional*):
124
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
125
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
126
+ """
127
+
128
+ loss: Optional[torch.FloatTensor] = None
129
+ logits: Optional[torch.FloatTensor] = None
130
+ past_key_values: Optional[Cache] = None
131
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
132
+ attentions: Optional[tuple[torch.FloatTensor]] = None
133
+ image_hidden_states: Optional[torch.FloatTensor] = None
134
+ image_data: Optional[ImageCache] = None
135
+ patch_logits: Optional[torch.FloatTensor] = None
136
+ subpatch_logits: Optional[torch.FloatTensor] = None
137
+ location_logits: Optional[torch.FloatTensor] = None
138
+ last_predicted_patch_id: Optional[torch.LongTensor] = None
139
+
140
+
141
+ @dataclass
142
+ class MolmoPointModelOutputWithPast(BaseModelOutputWithPast):
143
+ """
144
+ Base class for Molmo2 outputs, with hidden states and attentions.
145
+
146
+ Args:
147
+ image_hidden_states (`torch.FloatTensor`, *optional*):
148
+ A `torch.FloatTensor` of size `(batch_num_patches, hidden_size)`.
149
+ image_hidden_states of the model produced by the vision backbone
150
+ """
151
+ last_hidden_state: Optional[torch.FloatTensor] = None
152
+ past_key_values: Optional[Cache] = None
153
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
154
+ attentions: Optional[tuple[torch.FloatTensor]] = None
155
+ image_hidden_states: Optional[torch.FloatTensor] = None
156
+ image_data: Optional[ImageCache] = None
157
+ patch_logits: Optional[torch.FloatTensor] = None
158
+ subpatch_logits: Optional[torch.FloatTensor] = None
159
+ location_logits: Optional[torch.FloatTensor] = None
160
+ input_ids: Optional[torch.LongTensor] = None
161
+ last_predicted_patch_id: Optional[torch.LongTensor] = None
162
+
163
+
164
+ class MolmoPointPatchRope(nn.Module):
165
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
166
+
167
+ def __init__(
168
+ self,
169
+ theta: float,
170
+ dim: int,
171
+ device: Union[str, torch.device] = None,
172
+ ):
173
+ super().__init__()
174
+ attention_factor = 1.0 # Unused in this type of RoPE
175
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim))
176
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
177
+
178
+ def rotate_half(self, x: torch.Tensor) -> torch.Tensor:
179
+ B, hs = x.size()
180
+ x = x.view(B, 2, hs // 2)
181
+ x1, x2 = x.unbind(dim=-2)
182
+ return torch.cat((-x2, x1), dim=-1)
183
+
184
+ @torch.no_grad()
185
+ def forward(self, x, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
186
+ inv_freq_expanded = self.inv_freq.float().to(x.device)
187
+ position_ids_expanded = position_ids.float()
188
+
189
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
190
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
191
+ x = x.float()
192
+ freqs = position_ids_expanded[:, None] * inv_freq_expanded[None, :]
193
+ emb = torch.cat((freqs, freqs), dim=-1)
194
+ cos = emb.cos()
195
+ sin = emb.sin()
196
+ out = ((x * cos) + (self.rotate_half(x) * sin))
197
+
198
+ return out.to(dtype=x.dtype)
199
+
200
+
201
+ class ViTMultiHeadDotProductAttention(nn.Module):
202
+ def __init__(
203
+ self,
204
+ hidden_size: int,
205
+ num_heads: int,
206
+ num_key_value_heads: int,
207
+ head_dim: int,
208
+ use_bias: bool = True,
209
+ input_dim: Optional[int] = None,
210
+ float32_attention: bool = True,
211
+ attention_dropout: float = 0.0,
212
+ residual_dropout: float = 0.0,
213
+ device: Union[str, torch.device] = None,
214
+ attn_implementation: str = "eager",
215
+ out_layer: bool=True
216
+ ):
217
+ super().__init__()
218
+
219
+ self.hidden_size = hidden_size
220
+ self.num_heads = num_heads
221
+ self.head_dim = head_dim
222
+ self.num_key_value_heads = num_key_value_heads
223
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
224
+ self.attn_implementation = attn_implementation
225
+ self.is_causal = False
226
+
227
+ input_dim = input_dim or hidden_size
228
+
229
+ self.wq = nn.Linear(
230
+ input_dim,
231
+ self.num_heads * self.head_dim,
232
+ bias=use_bias,
233
+ device=device,
234
+ )
235
+ self.wk = nn.Linear(
236
+ input_dim,
237
+ self.num_key_value_heads * self.head_dim,
238
+ bias=use_bias,
239
+ device=device,
240
+ )
241
+ self.wv = nn.Linear(
242
+ input_dim,
243
+ self.num_key_value_heads * self.head_dim,
244
+ bias=use_bias,
245
+ device=device,
246
+ )
247
+ if out_layer:
248
+ self.wo = nn.Linear(
249
+ self.num_heads * self.head_dim,
250
+ self.hidden_size,
251
+ )
252
+ else:
253
+ self.wo = None
254
+ self.float32_attention = float32_attention
255
+ self.attention_dropout = attention_dropout
256
+ self.residual_dropout = nn.Dropout(residual_dropout)
257
+
258
+ def _split_heads(self, hidden_states, num_heads) -> torch.Tensor:
259
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
260
+
261
+ def _merge_heads(self, hidden_states) -> torch.Tensor:
262
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
263
+
264
+ def forward(
265
+ self,
266
+ inputs_q: torch.Tensor,
267
+ inputs_kv: Optional[torch.Tensor] = None,
268
+ attn_mask: Optional[torch.Tensor] = None,
269
+ ) -> torch.Tensor:
270
+
271
+ if inputs_kv is not None:
272
+ inputs_k = inputs_kv
273
+ inputs_v = inputs_kv
274
+ else:
275
+ inputs_k = inputs_q
276
+ inputs_v = inputs_q
277
+
278
+ xq, xk, xv = self.wq(inputs_q), self.wk(inputs_k), self.wv(inputs_v)
279
+
280
+ xq = self._split_heads(xq, self.num_heads)
281
+ xk = self._split_heads(xk, self.num_key_value_heads)
282
+ xv = self._split_heads(xv, self.num_key_value_heads)
283
+
284
+ if self.num_heads != self.num_key_value_heads:
285
+ xk = xk.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
286
+ xv = xv.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
287
+
288
+ og_dtype = xq.dtype
289
+
290
+ if self.float32_attention:
291
+ xq = xq.to(torch.float)
292
+ xk = xk.to(torch.float)
293
+
294
+ dropout_p = 0.0 if not self.training else self.attention_dropout
295
+
296
+ if self.attn_implementation == "eager":
297
+ attn_weights = torch.einsum("...qhd,...khd->...hqk", xq / math.sqrt(xq.size(-1)), xk)
298
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(xq.dtype)
299
+ attn_weights = F.dropout(
300
+ attn_weights,
301
+ p=dropout_p,
302
+ training=self.training
303
+ )
304
+ attn_output = torch.einsum("...hqk,...khd->...qhd", attn_weights.to(xv.dtype), xv)
305
+
306
+ elif self.attn_implementation == "sdpa":
307
+ if not torch.is_autocast_enabled():
308
+ xv = xv.to(torch.float)
309
+
310
+ attn_output = F.scaled_dot_product_attention(
311
+ xq.transpose(1, 2).contiguous(),
312
+ xk.transpose(1, 2).contiguous(),
313
+ xv.transpose(1, 2).contiguous(),
314
+ attn_mask=attn_mask,
315
+ is_causal=False,
316
+ dropout_p=dropout_p,
317
+ ).transpose(1, 2)
318
+
319
+ elif self.attn_implementation == "flash_attention_2":
320
+ if xq.dtype == torch.float32:
321
+ if torch.is_autocast_enabled():
322
+ target_dtype = torch.get_autocast_gpu_dtype()
323
+ else:
324
+ target_dtype = self.wq.weight.dtype
325
+ attn_output = _flash_attention_forward(
326
+ xq,
327
+ xk,
328
+ xv,
329
+ attention_mask=attn_mask,
330
+ query_length=inputs_q.shape[1],
331
+ is_causal=False,
332
+ dropout=dropout_p,
333
+ softmax_scale=xq.shape[-1] ** -0.5,
334
+ use_top_left_mask=flash_attn_supports_top_left_mask(),
335
+ target_dtype=target_dtype,
336
+ implementation=self.attn_implementation,
337
+ )
338
+ else:
339
+ raise ValueError(f"Attention implementation {self.attn_implementation} not supported")
340
+
341
+ attn_output = attn_output.to(og_dtype)
342
+ attn_output = self._merge_heads(attn_output)
343
+ if self.wo is not None:
344
+ attn_output = self.wo(attn_output)
345
+ attn_output = self.residual_dropout(attn_output)
346
+
347
+ return attn_output
348
+
349
+
350
+ class PointPredictor(nn.Module):
351
+ """Point predictor logic"""
352
+ # We separate this out so accelerate will co-locate all these parameters on the same device
353
+
354
+ def __init__(self, config):
355
+ super().__init__()
356
+ self.config = config
357
+ llm_dim = config.text_config.hidden_size
358
+ patch_embed_dim = config.patch_embed_dim
359
+ vit_dim = self.config.vit_config.hidden_size * len(self.config.adapter_config.vit_layers)
360
+ if self.config.layer_norm_x:
361
+ self.x_norm = Molmo2RMSNorm(llm_dim, eps=self.config.text_config.layer_norm_eps)
362
+ else:
363
+ self.x_norm = None
364
+ if self.config.token_prediction_rotary == "none":
365
+ self.patch_rotary = None
366
+ else:
367
+ theta = self.config.token_prediction_rotary_theta or self.config.llm.rope_theta
368
+ if self.config.token_prediction_rotary == "one_d":
369
+ self.patch_rotary = MolmoPointPatchRope(theta, self.config.patch_embed_dim)
370
+ else:
371
+ raise NotImplementedError()
372
+ self.patch_q = nn.Linear(llm_dim, patch_embed_dim)
373
+ self.patch_k = nn.Linear(llm_dim, patch_embed_dim)
374
+ self.subpatch_q = nn.Linear(llm_dim, patch_embed_dim)
375
+ self.subpatch_k = nn.Linear(vit_dim, patch_embed_dim)
376
+ self.add_no_point_class_embed = MolmoPointPadWithLearnedVector(patch_embed_dim)
377
+ if self.config.patch_location == "3x3":
378
+ self.subpatch_loc_k = nn.Linear(llm_dim, 9)
379
+ elif self.config.patch_location is None:
380
+ self.subpatch_loc_k = None
381
+ else:
382
+ raise NotImplementedError(f"Patch location {self.config.patch_location} not implemented")
383
+
384
+ def forward(
385
+ self,
386
+ x,
387
+ token_pooling,
388
+ is_image_token,
389
+ is_patch,
390
+ is_subpatch,
391
+ is_indexable_image_token,
392
+ vit_features,
393
+ vit_features_mask,
394
+ image_features_mask,
395
+ input_patch_ids,
396
+ last_predicted_patch_id,
397
+ image_data: ImageCache
398
+ ):
399
+ dim = self.config.text_config.hidden_size
400
+ batch_size = x.shape[0]
401
+ if self.x_norm is not None:
402
+ x_norm = self.x_norm(x)
403
+ elif self.config.norm_x:
404
+ x_norm = x / math.sqrt(dim)
405
+ else:
406
+ x_norm = x
407
+
408
+ # Build the keys, or get them from the cache
409
+ if image_data is not None:
410
+ patch_k, subpatch_k = image_data.patch_k, image_data.subpatch_k
411
+ patch_k_mask = image_data.patch_k_mask
412
+ token_pooling = image_data.token_pooling
413
+ vit_features_mask = token_pooling >= 0
414
+ image_pos_ids = image_data.image_pos_ids
415
+ else:
416
+ # Build patch keys, this takes a bit of indexing trickery since we want the keys in
417
+ # shape [batch, n_image_tokens] not [batch, sequence_length]
418
+ n_image_tokens = token_pooling.shape[1]
419
+ patch_k_flat = self.patch_k(x_norm.view(-1, dim)[is_image_token.view(-1)])
420
+ if self.patch_rotary is not None:
421
+ image_token_indices = torch.cumsum(is_indexable_image_token, dim=-1) - 1
422
+ image_pos_ids_flat = image_token_indices.view(-1)[is_image_token.view(-1)]
423
+ patch_k_flat = self.patch_rotary(patch_k_flat, image_pos_ids_flat)
424
+
425
+ # Computed for use with the query vectors
426
+ image_pos_ids = torch.zeros([batch_size, n_image_tokens], dtype=torch.long,
427
+ device=image_pos_ids_flat.device)
428
+ image_pos_ids.view(-1)[image_features_mask.view(-1)] = image_pos_ids_flat
429
+ else:
430
+ image_pos_ids = None
431
+
432
+ patch_k = torch.zeros([batch_size, n_image_tokens, patch_k_flat.shape[-1]],
433
+ dtype=x.dtype, device=x.device)
434
+ patch_k.view(-1, patch_k_flat.shape[-1])[image_features_mask.flatten()] = patch_k_flat.to(dtype=x.dtype)
435
+
436
+ patch_k_mask = image_features_mask.clone()
437
+ patch_k_mask.view(-1)[image_features_mask.view(-1)] = (
438
+ is_indexable_image_token.view(-1)[is_image_token.view(-1)])
439
+
440
+ if self.config.no_more_points_class:
441
+ patch_k = self.add_no_point_class_embed(patch_k)
442
+ patch_k_mask = F.pad(patch_k_mask, (0, 1), value=True)
443
+
444
+ subpatch_k = self.subpatch_k(vit_features)
445
+
446
+ patch_logits, subpatch_logits, location_logits = None, None, None
447
+ if image_data is not None:
448
+ # Predict patch locations, only done after pre-filling
449
+ batch_idx = torch.arange(batch_size, device=x_norm.device)
450
+ image_q = self.patch_q(x_norm)
451
+ if self.patch_rotary is not None and last_predicted_patch_id is not None:
452
+ rotate_by = image_pos_ids[batch_idx, last_predicted_patch_id]
453
+ rotate_by = torch.where(last_predicted_patch_id >= 0, rotate_by, 0)
454
+ rotate_by = rotate_by.squeeze(-1)
455
+ image_q = self.patch_rotary(
456
+ image_q.view(-1, image_q.shape[-1]),
457
+ torch.clamp(rotate_by, min=0),
458
+ ).reshape(batch_size, -1, image_q.shape[-1])
459
+
460
+ dots = torch.matmul(image_q, patch_k.transpose(1, 2)) # [batch, 1, num_images]
461
+ if self.config.norm_logits:
462
+ dots = dots / math.sqrt(dots.shape[-1])
463
+
464
+ valid = patch_k_mask[:, None, :]
465
+ patch_logits = torch.where(valid, dots, -100000000)
466
+
467
+ if torch.any(is_patch):
468
+ if x_norm.shape[1] != 1:
469
+ raise NotImplementedError()
470
+ subpatch_point_q = self.subpatch_q(x_norm.squeeze(1))
471
+ subpatch_k = subpatch_k[batch_idx, input_patch_ids.squeeze(1)]
472
+ subpatch_logits = torch.einsum("pd,pcd->pc", subpatch_point_q, subpatch_k)
473
+ if self.config.norm_logits:
474
+ subpatch_logits = subpatch_logits / math.sqrt(patch_k.shape[-1])
475
+ subpatch_mask = vit_features_mask[batch_idx, input_patch_ids.squeeze(1)]
476
+ subpatch_logits = torch.where(subpatch_mask, subpatch_logits, -100000)
477
+ subpatch_logits = subpatch_logits[:, None, :]
478
+
479
+ if torch.any(is_subpatch):
480
+ location_logits = self.subpatch_loc_k(x)
481
+
482
+ if image_data is None:
483
+ image_data = ImageCache(
484
+ patch_k=patch_k,
485
+ subpatch_k=subpatch_k,
486
+ vit_features=vit_features,
487
+ patch_k_mask=patch_k_mask,
488
+ token_pooling=token_pooling,
489
+ image_pos_ids=image_pos_ids,
490
+ )
491
+ return patch_logits, subpatch_logits, location_logits, image_data
492
+
493
+
494
+ class MolmoPointPreTrainedModel(PreTrainedModel):
495
+ config: MolmoPointConfig
496
+ base_model_prefix = "model"
497
+ supports_gradient_checkpointing = True
498
+ _no_split_modules = [
499
+ "Molmo2DecoderLayer",
500
+ "Molmo2PostNormDecoderLayer",
501
+ "Molmo2VisionBlock",
502
+ "ViTMultiHeadDotProductAttention",
503
+ "PointPredictor"
504
+ ]
505
+ _skip_keys_device_placement = "past_key_values"
506
+ _supports_flash_attn = True
507
+ _supports_sdpa = True
508
+
509
+ _can_compile_fullgraph = True
510
+ _supports_attention_backend = True
511
+ _can_record_outputs = {
512
+ "hidden_states": Molmo2DecoderLayer,
513
+ "attentions": Molmo2Attention,
514
+ }
515
+
516
+ def _init_weights(self, module):
517
+ std = self.config.initializer_range
518
+ if isinstance(module, (nn.Linear,)):
519
+ module.weight.data.normal_(mean=0.0, std=std)
520
+ if module.bias is not None:
521
+ module.bias.data.zero_()
522
+ elif isinstance(module, Molmo2Embedding):
523
+ module.embedding.data.normal_(mean=0.0, std=std)
524
+ module.new_embedding.data.normal_(mean=0.0, std=std)
525
+ elif isinstance(module, nn.Embedding):
526
+ module.weight.data.normal_(mean=0.0, std=std)
527
+ if module.padding_idx is not None:
528
+ module.weight.data[module.padding_idx].zero_()
529
+ elif isinstance(module, Molmo2RMSNorm):
530
+ module.weight.data.fill_(1.0)
531
+ elif isinstance(module, nn.LayerNorm):
532
+ module.weight.data.fill_(1.0)
533
+ if module.bias is not None:
534
+ module.bias.data.zero_()
535
+
536
+
537
+ class GeneratedTokenBounds:
538
+ """Describes what tokens id ranges are patch/subpatch/location tokens"""
539
+
540
+ def __init__(self, vocab_size, n_patches, n_subpatches, n_locations, no_more_points_class):
541
+ self.n_locations = n_locations
542
+ self.n_patches = n_patches
543
+ self.n_subpatches = n_subpatches
544
+ self.vocab_size = vocab_size
545
+
546
+ if no_more_points_class:
547
+ self.no_more_points_token_id = vocab_size + n_patches
548
+ else:
549
+ self.no_more_points_token_id = -1
550
+ self.patch_start = vocab_size
551
+ self.patch_end_without_no_more_points = vocab_size + n_patches
552
+ self.patch_end = vocab_size + n_patches + int(no_more_points_class)
553
+ self.subpatch_start = self.patch_end
554
+ self.subpatch_end = self.subpatch_start + n_subpatches
555
+ self.location_start = self.subpatch_end
556
+ self.location_end = self.subpatch_end + n_locations
557
+
558
+
559
+ class MolmoPointLogitProcessor(LogitsProcessor):
560
+ """Force point-special tokens to be generated in a valid order"""
561
+
562
+ def __init__(self, bounds: GeneratedTokenBounds,
563
+ prevent_repeats, force_patch_sorted, force_subpatch_sorted):
564
+ self.bounds = bounds
565
+ self.prevent_repeats = prevent_repeats
566
+ self.force_patch_sorted = force_patch_sorted
567
+ self.force_subpatch_sorted = force_subpatch_sorted
568
+
569
+ def __call__(self, input_ids, scores):
570
+ b = self.bounds
571
+ is_complete_patch = (b.patch_start <= input_ids) & (input_ids < b.patch_end)
572
+ is_complete_subpatch = (b.subpatch_start <= input_ids) & (input_ids < b.subpatch_end)
573
+
574
+ if b.n_locations:
575
+ is_complete_patch[:, -2:] = False
576
+ is_complete_subpatch[:, -2:] = False
577
+ else:
578
+ is_complete_patch[:, -1] = False
579
+ is_complete_subpatch[:, -1] = False
580
+
581
+ for batch in range(len(input_ids)):
582
+ batch_input_ids = input_ids[batch]
583
+ last_token = batch_input_ids[-1]
584
+
585
+ batch_is_patch_token = is_complete_patch[batch]
586
+ last_predicted_patch_token = batch_input_ids[is_complete_patch[batch]]
587
+ if len(last_predicted_patch_token):
588
+ last_predicted_patch_token = last_predicted_patch_token[-1]
589
+ else:
590
+ last_predicted_patch_token = None
591
+
592
+ last_predicted_subpatch_token = batch_input_ids[is_complete_subpatch[batch]]
593
+ if len(last_predicted_subpatch_token):
594
+ last_predicted_subpatch_token = last_predicted_subpatch_token[-1]
595
+ else:
596
+ last_predicted_subpatch_token = None
597
+
598
+ no_more_points = torch.any(batch_input_ids == b.no_more_points_token_id)
599
+
600
+ if no_more_points:
601
+ # Cannot generate any kind of point
602
+ scores[batch, b.patch_start:b.location_end] = -float("inf")
603
+ elif last_token < b.patch_start or last_token >= b.subpatch_end:
604
+ # Cannot generate subpatch/location, but might generate a patch
605
+ scores[batch, b.subpatch_start:b.location_end] = -float("inf")
606
+
607
+ if self.force_patch_sorted and last_predicted_patch_token is not None:
608
+ # Cannot generate patches that occurs before the previously predicted patch
609
+ scores[batch, b.patch_start:last_predicted_patch_token] = -float("inf")
610
+
611
+ if (
612
+ self.prevent_repeats and
613
+ self.force_subpatch_sorted and
614
+ last_predicted_subpatch_token is not None and
615
+ last_predicted_subpatch_token == (b.subpatch_end-1)
616
+ ):
617
+ # Generating `last_predicted_patch_token` would force us to generate a repeat
618
+ # since the only subpatch we can predict while keeping sorted order
619
+ # will repeat the previous point
620
+ scores[batch, last_predicted_patch_token] = -float("inf")
621
+
622
+ elif b.patch_start <= last_token < b.patch_end:
623
+ # Last token was a patch token, must select a subpatch next
624
+ scores[batch, :b.subpatch_start] = -float("inf")
625
+ scores[batch, b.subpatch_end:] = -float("inf")
626
+ if (
627
+ self.force_subpatch_sorted and
628
+ last_predicted_patch_token == last_token
629
+ ):
630
+ assert last_predicted_subpatch_token is not None
631
+ if self.prevent_repeats:
632
+ assert last_predicted_subpatch_token != b.subpatch_end-1
633
+ scores[batch, b.subpatch_start:last_predicted_subpatch_token+1] = -float("inf")
634
+ else:
635
+ scores[batch, b.subpatch_start:last_predicted_subpatch_token] = -float("inf")
636
+
637
+ elif b.n_locations and b.subpatch_start <= last_token < b.subpatch_end:
638
+ # Last token was a subpatch token, must select a location next
639
+ scores[batch, :b.location_start] = -float("inf")
640
+ scores[batch, b.location_end:] = -float("inf")
641
+ else:
642
+ raise RuntimeError("Unreachable")
643
+ return scores
644
+
645
+
646
+ @dataclass
647
+ class Molmo2TextBaseOutput(BaseModelOutputWithPast):
648
+ pre_ln_hidden_state: Optional[torch.FloatTensor] = None
649
+
650
+
651
+ class MolmoPointTextModel(PreTrainedModel):
652
+ config: Molmo2TextConfig
653
+ _no_split_modules = ["Molmo2DecoderLayer", "Molmo2PostNormDecoderLayer"]
654
+ base_model_prefix = "model"
655
+ supports_gradient_checkpointing = True
656
+ _skip_keys_device_placement = "past_key_values"
657
+ _supports_flash_attn = True
658
+ _supports_sdpa = True
659
+
660
+ _can_compile_fullgraph = True
661
+ _supports_attention_backend = True
662
+ _can_record_outputs = {
663
+ "hidden_states": Molmo2DecoderLayer,
664
+ "attentions": Molmo2Attention,
665
+ }
666
+
667
+ def __init__(self, config: Molmo2TextConfig):
668
+ super().__init__(config)
669
+ if config.additional_vocab_size is not None:
670
+ self.wte = Molmo2Embedding(
671
+ config.vocab_size,
672
+ config.additional_vocab_size,
673
+ config.hidden_size,
674
+ )
675
+ else:
676
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
677
+ self.emb_drop = nn.Dropout(config.embedding_dropout)
678
+ decoder_layer = Molmo2PostNormDecoderLayer if config.norm_after else Molmo2DecoderLayer
679
+ self.blocks = nn.ModuleList(
680
+ [decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
681
+ )
682
+ self.ln_f = Molmo2RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
683
+ if config.rope_scaling_layers is not None:
684
+ self.rotary_embs = nn.ModuleDict(
685
+ {
686
+ "default": Molmo2RotaryEmbedding(config, rope_type="default"),
687
+ "scaling": Molmo2RotaryEmbedding(config),
688
+ }
689
+ )
690
+ else:
691
+ self.rotary_emb = Molmo2RotaryEmbedding(config)
692
+ self.gradient_checkpointing = False
693
+
694
+ # Initialize weights and apply final processing
695
+ self.post_init()
696
+
697
+ def get_input_embeddings(self) -> torch.nn.Module:
698
+ return self.wte
699
+
700
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
701
+ self.wte = value
702
+
703
+ @can_return_tuple
704
+ def forward(
705
+ self,
706
+ input_ids: Optional[torch.LongTensor] = None,
707
+ attention_mask: Optional[torch.Tensor] = None,
708
+ position_ids: Optional[torch.LongTensor] = None,
709
+ past_key_values: Optional[Cache] = None,
710
+ inputs_embeds: Optional[torch.FloatTensor] = None,
711
+ use_cache: Optional[bool] = None,
712
+ output_attentions: Optional[bool] = None,
713
+ output_hidden_states: Optional[bool] = None,
714
+ output_pre_ln_state: Optional[bool] = None,
715
+ cache_position: Optional[torch.LongTensor] = None,
716
+ **kwargs: Unpack[TransformersKwargs],
717
+ ) -> Molmo2TextBaseOutput:
718
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
719
+ output_hidden_states = (
720
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
721
+ )
722
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
723
+
724
+ if (input_ids is None) ^ (inputs_embeds is not None):
725
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
726
+
727
+ if self.gradient_checkpointing and self.training and use_cache:
728
+ logger.warning_once(
729
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
730
+ )
731
+ use_cache = False
732
+
733
+ if inputs_embeds is None:
734
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
735
+ inputs_embeds = self.wte(input_ids)
736
+
737
+ # torch.jit.trace() doesn't support cache objects in the output
738
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
739
+ past_key_values = DynamicCache(config=self.config)
740
+
741
+ if cache_position is None:
742
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
743
+ cache_position = torch.arange(
744
+ past_seen_tokens,
745
+ past_seen_tokens + inputs_embeds.shape[1],
746
+ device=inputs_embeds.device,
747
+ )
748
+
749
+ if position_ids is None:
750
+ position_ids = cache_position.unsqueeze(0)
751
+
752
+ # It may already have been prepared by e.g. `generate`
753
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
754
+ # Prepare mask arguments
755
+ mask_kwargs = {
756
+ "config": self.config,
757
+ "input_embeds": inputs_embeds,
758
+ "attention_mask": attention_mask,
759
+ "cache_position": cache_position,
760
+ "past_key_values": past_key_values,
761
+ "position_ids": position_ids,
762
+ }
763
+
764
+ # Create the mask
765
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
766
+
767
+ hidden_states = inputs_embeds
768
+
769
+ # create position embeddings to be shared across the decoder layers
770
+ if self.config.rope_scaling_layers is not None:
771
+ position_embeddings_mapping = {
772
+ "default": self.rotary_embs["default"](hidden_states, position_ids),
773
+ "scaling": self.rotary_embs["scaling"](hidden_states, position_ids),
774
+ }
775
+ else:
776
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
777
+
778
+ # decoder layers
779
+ all_hidden_states = () if output_hidden_states else None
780
+ all_self_attns = () if output_attentions else None
781
+
782
+ for layer_idx, decoder_block in enumerate(self.blocks[: self.config.num_hidden_layers]):
783
+ if output_hidden_states:
784
+ all_hidden_states += (hidden_states,)
785
+
786
+ if self.config.rope_scaling_layers is not None:
787
+ position_embeddings_i = (
788
+ position_embeddings_mapping["scaling"]
789
+ if layer_idx in self.config.rope_scaling_layers
790
+ else position_embeddings_mapping["default"]
791
+ )
792
+ else:
793
+ position_embeddings_i = position_embeddings
794
+
795
+ layer_outputs = decoder_block(
796
+ hidden_states,
797
+ attention_mask=causal_mask_mapping,
798
+ position_ids=position_ids,
799
+ past_key_values=past_key_values,
800
+ output_attentions=output_attentions,
801
+ use_cache=use_cache,
802
+ cache_position=cache_position,
803
+ position_embeddings=position_embeddings_i,
804
+ **kwargs,
805
+ )
806
+
807
+ hidden_states = layer_outputs[0]
808
+
809
+ if output_attentions:
810
+ all_self_attns += (layer_outputs[1],)
811
+
812
+ pre_ln_state = hidden_states
813
+ hidden_states = self.ln_f(hidden_states)
814
+
815
+ # add hidden states from the last decoder layer
816
+ if output_hidden_states:
817
+ all_hidden_states += (hidden_states,)
818
+
819
+ return Molmo2TextBaseOutput(
820
+ last_hidden_state=hidden_states,
821
+ past_key_values=past_key_values,
822
+ pre_ln_hidden_state=pre_ln_state,
823
+ hidden_states=hidden_states,
824
+ attentions=all_self_attns,
825
+ )
826
+
827
+ # Adapted from transformers.models.gemma3.modeling_gemma3
828
+ def token_type_ids_mask_function(
829
+ token_type_ids: Optional[torch.Tensor] = None,
830
+ ) -> Optional[Callable]:
831
+ """
832
+ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
833
+ not start and end indices.
834
+ """
835
+ # Do not return an additional mask in this case
836
+ if token_type_ids is None:
837
+ return None
838
+
839
+ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
840
+ # If it's 1 for both query and key/value, we are in an image block
841
+ # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length
842
+ # Since vmap doesn't support `if statement` we workaround it with `torch.where`
843
+ safe_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0)
844
+ token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_idx]
845
+ token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0)
846
+
847
+ is_image_block = (token_type_ids[batch_idx, q_idx] == 1) & (token_type_ids_at_kv_idx == 1)
848
+
849
+ # This is bidirectional attention whenever we are dealing with image tokens
850
+ return is_image_block & is_image_block
851
+
852
+ return inner_mask
853
+
854
+
855
+ class MolmoPointPadWithLearnedVector(nn.Module):
856
+ """Module that pads vector
857
+
858
+ Used to add in the no-more-point key value
859
+ """
860
+ def __init__(self, dim: int):
861
+ super().__init__()
862
+ self.dim = dim
863
+ self.vector = nn.Parameter(torch.zeros([dim]))
864
+
865
+ def reset_parameters(self):
866
+ torch.nn.init.zeros_(self.vector)
867
+
868
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
869
+ vector = torch.tile(self.vector[None, :], [x.shape[0], 1])
870
+ return torch.concatenate([x, vector[:, None, :]], dim=1)
871
+
872
+
873
+ class AddPosEmbed(nn.Module):
874
+
875
+ def __init__(self, in_features: int, n_pos: int) -> None:
876
+ super().__init__()
877
+ self.bias = nn.Parameter(torch.zeros([n_pos, in_features]))
878
+
879
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
880
+ return input + self.bias[None, :input.shape[-2], :]
881
+
882
+
883
+ class MolmoPointAdapter(nn.Module):
884
+ def __init__(self, config: MolmoPointAdapterConfig, vit_config: Molmo2VitConfig):
885
+ super().__init__()
886
+ self.config = config
887
+ self.n_vit_layers = len(config.vit_layers)
888
+ pool_dim = vit_config.hidden_size * self.n_vit_layers
889
+ self.norm = None
890
+ self.image_projector = ImageProjectorMLP(
891
+ config.hidden_size,
892
+ config.intermediate_size,
893
+ config.text_hidden_size,
894
+ config.hidden_act,
895
+ )
896
+ self.act = ACT2FN[config.hidden_act]
897
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
898
+ hidden_size=config.hidden_size,
899
+ num_heads=config.num_attention_heads,
900
+ num_key_value_heads=config.num_key_value_heads,
901
+ head_dim=config.head_dim,
902
+ input_dim=pool_dim,
903
+ float32_attention=config.float32_attention,
904
+ attention_dropout=config.attention_dropout,
905
+ residual_dropout=config.residual_dropout,
906
+ attn_implementation=config._attn_implementation,
907
+ out_layer=config.attention_pooling_out_layer
908
+ )
909
+ if self.config.positional_embeddings:
910
+ self.positional_embeddings = AddPosEmbed(pool_dim, self.config.positional_embeddings)
911
+ else:
912
+ self.positional_embeddings = None
913
+
914
+ def __call__(self, to_pool, to_pool_mask):
915
+ """
916
+ to_pool: [n_to_pool, pooling_dim, vit_dim]
917
+ to_pool_mask: [n_to_pool, pooling_dim]
918
+
919
+ returns:
920
+ pooled_features: [n_to_pool, llm_dim]
921
+ """
922
+ cfg = self.config
923
+
924
+ if self.config.positional_embeddings:
925
+ to_pool = self.positional_embeddings(to_pool)
926
+
927
+ if self.config.pooling_attention_mask:
928
+ attn_mask = to_pool_mask.reshape([-1, 1, 1, to_pool_mask.shape[-1]])
929
+ else:
930
+ attn_mask = None
931
+ to_pool = to_pool * to_pool_mask.float()[:, :, None]
932
+
933
+ denom = to_pool_mask.view(-1, to_pool.shape[-2]).float().sum(-1)
934
+ denom = torch.where(denom == 0, 1, denom)
935
+ query = to_pool.sum(-2, keepdim=True) / denom[:, None, None]
936
+
937
+ pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
938
+ pooled_features = self.image_projector(pooled_features)
939
+ return pooled_features
940
+
941
+
942
+ def extract_image_points(output_text, pooling, mappings, no_more_points_class, location, image_sizes):
943
+ """Extract points from MolmoPoint image output text
944
+
945
+ return points: [n_points, 4] array of (object_id, image_num, x, y) points
946
+ """
947
+ if len(mappings) != len(image_sizes):
948
+ raise ValueError("Mapping and image sizes must have the same length")
949
+ extracted_points = []
950
+ for vit_patch_id, location_id, example_id in get_subpatch_ids(output_text, pooling, no_more_points_class):
951
+ for image_ix, (mapping, (w, h)) in enumerate(zip(mappings, image_sizes)):
952
+ patch_coords = np.argwhere(mapping == int(vit_patch_id))
953
+ if len(patch_coords) == 1:
954
+ p_y, p_x = patch_coords[0]
955
+ if location_id is not None:
956
+ loc_x = location_id // 3
957
+ loc_y = location_id % 3
958
+ p_x += (loc_x+0.5)*0.33
959
+ p_y += (loc_y+0.5)*0.33
960
+ else:
961
+ p_x += 0.5
962
+ p_y += 0.5
963
+ extracted_points.append([
964
+ example_id,
965
+ image_ix,
966
+ (p_x / mapping.shape[1]) * w,
967
+ (p_y / mapping.shape[0]) * h,
968
+ ])
969
+ break
970
+ else:
971
+ logger.error("Invalid patch id encountered")
972
+ return extracted_points
973
+
974
+
975
+ def extract_video_points(output_text, pooling, mapping, timestamps, no_more_points_class,
976
+ location, video_size):
977
+ """
978
+ Extract points from MolmoPoint video output text
979
+
980
+ return points: [n_points, 4] array of (object_id, timestamp, x, y) points
981
+ """
982
+ extracted_points = []
983
+ for vit_patch_id, location_id, example_id in get_subpatch_ids(output_text, pooling, no_more_points_class):
984
+ patch_coords = np.argwhere(mapping == int(vit_patch_id))
985
+ if len(patch_coords) == 1:
986
+ frame_ix, p_y, p_x = patch_coords[0]
987
+ if location_id is not None:
988
+ loc_x = location_id // 3
989
+ loc_y = location_id % 3
990
+ p_x += (loc_x+0.5)*0.33
991
+ p_y += (loc_y+0.5)*0.33
992
+ else:
993
+ p_x += 0.5
994
+ p_y += 0.5
995
+ ts = timestamps[frame_ix]
996
+ extracted_points.append([
997
+ example_id,
998
+ ts,
999
+ (p_x / mapping.shape[2]) * video_size[0],
1000
+ (p_y / mapping.shape[1]) * video_size[1]
1001
+ ])
1002
+ else:
1003
+ logger.error("Invalid patch id encountered")
1004
+ return extracted_points
1005
+
1006
+
1007
+ class MolmoPointModel(MolmoPointPreTrainedModel):
1008
+ base_model_prefix = ""
1009
+ _checkpoint_conversion_mapping = {}
1010
+ # Reference: fix gemma3 grad acc #37208
1011
+ accepts_loss_kwargs = False
1012
+ config: MolmoPointConfig
1013
+
1014
+ def __init__(self, config: MolmoPointConfig):
1015
+ super().__init__(config)
1016
+ self.transformer: MolmoPointTextModel = MolmoPointTextModel(config.text_config)
1017
+ self.patch_token_id = self.config.patch_token_id
1018
+ self.subpatch_token_id = self.config.subpatch_token_id
1019
+ self.location_token_id = self.config.location_token_id
1020
+
1021
+ vit_config = config.vit_config
1022
+ adapter_config = config.adapter_config
1023
+ self.vit_layers = []
1024
+ for layer in adapter_config.vit_layers:
1025
+ if layer >= 0:
1026
+ self.vit_layers.append(layer)
1027
+ else:
1028
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
1029
+
1030
+ last_layer_needed = max(self.vit_layers) + 1
1031
+ if last_layer_needed < vit_config.num_hidden_layers:
1032
+ new_vit_config = deepcopy(vit_config)
1033
+ new_vit_config.num_hidden_layers = last_layer_needed
1034
+ self.vit = Molmo2VisionTransformer(new_vit_config)
1035
+ else:
1036
+ self.vit = Molmo2VisionTransformer(vit_config)
1037
+
1038
+ self.connector = MolmoPointAdapter(adapter_config, vit_config)
1039
+ if self.config.embed_selected_vit_patch == "linear":
1040
+ llm_dim = config.text_config.hidden_size
1041
+ vit_dim = self.config.vit_config.hidden_size * len(self.config.adapter_config.vit_layers)
1042
+ self.build_vit_embedding = nn.Linear(vit_dim, llm_dim, bias=True)
1043
+ else:
1044
+ raise NotImplementedError(f"Embedding {self.config.embed_selected_vit_patch} not implemented")
1045
+ self.point_predictor = PointPredictor(config)
1046
+
1047
+ # Initialize weights and apply final processing
1048
+ self.post_init()
1049
+
1050
+ def build_token_bounds(self, token_pooling):
1051
+ n_patches, n_subpatches = token_pooling.shape[-2:]
1052
+ return GeneratedTokenBounds(
1053
+ vocab_size=self.config.vocab_size + self.config.text_config.additional_vocab_size,
1054
+ n_patches=n_patches,
1055
+ n_subpatches=n_subpatches,
1056
+ n_locations=9 if self.config.patch_location else 0,
1057
+ no_more_points_class=self.config.no_more_points_class,
1058
+ )
1059
+
1060
+ def get_input_embeddings(self) -> torch.nn.Module:
1061
+ return self.transformer.wte
1062
+
1063
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1064
+ self.transformer.wte = value
1065
+
1066
+ def set_decoder(self, decoder):
1067
+ self.transformer = decoder
1068
+
1069
+ def get_decoder(self):
1070
+ return self.transformer
1071
+
1072
+ @property
1073
+ def device(self) -> torch.device:
1074
+ return self.transformer.ln_f.weight.device
1075
+
1076
+ def build_batched_images(
1077
+ self,
1078
+ input_ids: torch.LongTensor,
1079
+ pixel_values: torch.Tensor,
1080
+ image_token_pooling: torch.Tensor,
1081
+ image_grids: torch.Tensor,
1082
+ image_num_crops: torch.Tensor,
1083
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1084
+ # 1) Count the number of images in each example
1085
+ raw_counts = (input_ids == self.config.image_end_token_id).sum(1) # [N]
1086
+ # Each image is represented by global view and high-res view
1087
+ # so we divide by 2 to get the number of images
1088
+ counts = raw_counts // 2
1089
+ N = counts.size(0)
1090
+ device = input_ids.device
1091
+
1092
+ # Total number of images in the batch
1093
+ num_images = int(counts.sum().item())
1094
+
1095
+ # Sanity check
1096
+ assert image_grids.size(0) == num_images, \
1097
+ f"Expected {num_images} image grids, but got {image_grids.size(0)}"
1098
+ assert image_num_crops.size(0) == num_images, \
1099
+ f"Expected {num_images} image num crops, but got {image_num_crops.size(0)}"
1100
+
1101
+ # 1-1) Compute per-image pooled patch count from image grids
1102
+ with torch.no_grad():
1103
+ first_prod = image_grids[:, :2].prod(dim=1) # [num_images]
1104
+ second_prod = image_grids[:, 2:].prod(dim=1) # [num_images]
1105
+ num_pooled_patches_per_image = (first_prod + second_prod).to(image_num_crops.dtype) # [num_images]
1106
+
1107
+ # pixel_values: [n_crops, n_patches, pixels_per_patch]
1108
+ n_crops, n_patches, pixels_per_patch = pixel_values.shape
1109
+
1110
+ # 2) Map each image index → example index
1111
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1112
+ example_ids_for_image = torch.arange(N, device=device).repeat_interleave(counts) # [num_images]
1113
+ assert example_ids_for_image.numel() == num_images
1114
+
1115
+ # 2-1) Compute crops_per_example by summing per-image crop counts
1116
+ crops_per_example = torch.zeros(
1117
+ N, dtype=image_num_crops.dtype, device=image_num_crops.device
1118
+ )
1119
+ crops_per_example.index_add_(0, example_ids_for_image, image_num_crops) # [N]
1120
+
1121
+ # 2-2) Per-image number of patches = (crops per image) * n_patches
1122
+ patches_per_image = image_num_crops * n_patches # [num_images]
1123
+
1124
+ # 2-3) Compute per-example per-image patch offsets
1125
+ counts_list = counts.tolist()
1126
+ index_offset_per_example_list = []
1127
+ offset_img = 0
1128
+ for c in counts_list:
1129
+ per_img_patches = patches_per_image[offset_img:offset_img + c] # [c]
1130
+ # Offsets: [0, img0_total_patches, img0+img1_total_patches, ...]
1131
+ index_offset = [0] + per_img_patches.cumsum(0).tolist()[:-1]
1132
+ index_offset_per_example_list.append(index_offset)
1133
+ offset_img += c
1134
+
1135
+ # 2-4) Compute num_pooled_patches_per_example
1136
+ num_pooled_patches_per_example = torch.zeros(
1137
+ N, dtype=num_pooled_patches_per_image.dtype, device=num_pooled_patches_per_image.device
1138
+ )
1139
+ num_pooled_patches_per_example.index_add_(
1140
+ 0, example_ids_for_image, num_pooled_patches_per_image
1141
+ )
1142
+
1143
+ # Sanity checks
1144
+ total_crops = int(crops_per_example.sum().item())
1145
+ assert total_crops == n_crops, \
1146
+ f"Expected {total_crops} crops, but got {n_crops}"
1147
+
1148
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1149
+ assert total_num_pooled_patches == image_token_pooling.size(0), \
1150
+ f"Expected {total_num_pooled_patches} pooled patches, but got {image_token_pooling.size(0)}"
1151
+
1152
+ # 3) Build images tensor filled with -1
1153
+ M = int(crops_per_example.max().item())
1154
+ images = torch.full(
1155
+ (N, M, n_patches, pixels_per_patch),
1156
+ fill_value=-1,
1157
+ dtype=pixel_values.dtype,
1158
+ device=pixel_values.device,
1159
+ )
1160
+
1161
+ # 4) Fill images with per-example slices from pixel_values
1162
+ offset_crop = 0
1163
+ for i in range(N):
1164
+ num = int(crops_per_example[i].item())
1165
+ cur = pixel_values[offset_crop:offset_crop + num] # [num, n_patches, pixels_per_patch]
1166
+ images[i, :num] = cur
1167
+ offset_crop += num
1168
+
1169
+ # Sanity check
1170
+ assert offset_crop == n_crops
1171
+
1172
+ # 5) Build new_token_pooling tensor filled with -1
1173
+ P = int(num_pooled_patches_per_example.max().item())
1174
+ _, dim = image_token_pooling.shape
1175
+ new_token_pooling = torch.full(
1176
+ (N, P, dim),
1177
+ fill_value=-1,
1178
+ dtype=image_token_pooling.dtype,
1179
+ device=image_token_pooling.device,
1180
+ )
1181
+
1182
+ # 6) Fill token_pooling with per-example slices, adding per-image patch offsets
1183
+ patch_offset = 0
1184
+ img_offset = 0
1185
+
1186
+ for i, c in enumerate(counts_list):
1187
+ num_patches = int(num_pooled_patches_per_example[i].item())
1188
+
1189
+ # Subsequence of pooled tokens belonging to this example
1190
+ cur = image_token_pooling[patch_offset:patch_offset + num_patches].clone() # [num_patches, dim]
1191
+
1192
+ index_offset_per_example = index_offset_per_example_list[i] # length = c
1193
+ per_img_pooled = num_pooled_patches_per_image[img_offset:img_offset + c] # [c]
1194
+
1195
+ assert len(index_offset_per_example) == per_img_pooled.numel()
1196
+
1197
+ # Apply per-image offsets to the (ragged) subsequence
1198
+ offset = 0
1199
+ for j in range(c):
1200
+ index_offset = int(index_offset_per_example[j])
1201
+ n = int(per_img_pooled[j].item())
1202
+ cur_slice = cur[offset:offset + n]
1203
+
1204
+ # Apply offset across all columns
1205
+ cur[offset:offset + n] = torch.where(
1206
+ cur_slice >= 0,
1207
+ cur_slice + index_offset,
1208
+ cur_slice,
1209
+ )
1210
+ offset += n
1211
+
1212
+ new_token_pooling[i, :num_patches] = cur
1213
+
1214
+ patch_offset += num_patches
1215
+ img_offset += c
1216
+
1217
+ # Final sanity checks
1218
+ assert patch_offset == total_num_pooled_patches
1219
+ assert img_offset == num_images
1220
+
1221
+ return images, new_token_pooling
1222
+
1223
+ def build_batched_videos(
1224
+ self,
1225
+ input_ids: torch.LongTensor,
1226
+ pixel_values_videos: torch.Tensor,
1227
+ video_token_pooling: torch.Tensor,
1228
+ video_grids: torch.Tensor,
1229
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1230
+
1231
+ # 1) Count the number of videos in each example
1232
+ if self.config.use_frame_special_tokens:
1233
+ end_token_id = self.config.frame_end_token_id
1234
+ else:
1235
+ end_token_id = self.config.image_end_token_id
1236
+ counts = (input_ids == end_token_id).any(dim=1).long() # [N]
1237
+ N = counts.size(0)
1238
+ device = input_ids.device
1239
+
1240
+ # Total number of videos in the batch
1241
+ num_videos = int(counts.sum().item())
1242
+
1243
+ # Sanity check
1244
+ assert video_grids.size(0) == num_videos, \
1245
+ f"Expected {num_videos} videos, but got {video_grids.size(0)}"
1246
+
1247
+ video_num_frames = video_grids[:, 0] # [num_videos]
1248
+ num_pooled_patches_per_video = video_grids.prod(dim=1) # [num_videos]
1249
+
1250
+ # pixel_values_videos: [n_frames, n_patches, pixels_per_patch]
1251
+ n_frames, n_patches, pixels_per_patch = pixel_values_videos.shape
1252
+
1253
+ # 2) Map each video index -> example index
1254
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1255
+ example_ids_for_video = torch.arange(N, device=device).repeat_interleave(counts) # [num_videos]
1256
+ assert example_ids_for_video.numel() == num_videos
1257
+
1258
+ # 2-1) Compute frames_per_example by summing per-video frame counts
1259
+ frames_per_example = torch.zeros(
1260
+ N, dtype=video_num_frames.dtype, device=device,
1261
+ )
1262
+ frames_per_example.index_add_(0, example_ids_for_video, video_num_frames) # [N]
1263
+
1264
+ # 2-2) Compute num_pooled_patches_per_example
1265
+ num_pooled_patches_per_example = torch.zeros(
1266
+ N, dtype=num_pooled_patches_per_video.dtype, device=num_pooled_patches_per_video.device,
1267
+ )
1268
+ num_pooled_patches_per_example.index_add_(
1269
+ 0, example_ids_for_video, num_pooled_patches_per_video,
1270
+ )
1271
+
1272
+ # Sanity checks
1273
+ total_frames = int(frames_per_example.sum().item())
1274
+ assert total_frames == n_frames, \
1275
+ f"Expected {total_frames} frames, but got {n_frames}"
1276
+
1277
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1278
+ assert total_num_pooled_patches == video_token_pooling.size(0), \
1279
+ f"Expected {total_num_pooled_patches} pooled patches, but got {video_token_pooling.size(0)}"
1280
+
1281
+ # 3) Build videos tensor filled with -1
1282
+ M = int(frames_per_example.max().item())
1283
+ videos = torch.full(
1284
+ (N, M, n_patches, pixels_per_patch),
1285
+ fill_value=-1,
1286
+ dtype=pixel_values_videos.dtype,
1287
+ device=device,
1288
+ )
1289
+
1290
+ # 4) Fill videos with per-examples slices from pixel_values_videos
1291
+ offset_frame = 0
1292
+ for i in range(N):
1293
+ num = int(frames_per_example[i].item())
1294
+ cur = pixel_values_videos[offset_frame:offset_frame + num] # [num, n_patches, pixels_per_patch]
1295
+ videos[i, :num] = cur
1296
+ offset_frame += num
1297
+
1298
+ # Sanity check
1299
+ assert offset_frame == n_frames
1300
+
1301
+ # 5) Build new token_pooling tensor filled with -1
1302
+ P = int(num_pooled_patches_per_example.max().item())
1303
+ _, dim = video_token_pooling.shape
1304
+ new_token_pooling = torch.full(
1305
+ (N, P, dim),
1306
+ fill_value=-1,
1307
+ dtype=video_token_pooling.dtype,
1308
+ device=video_token_pooling.device,
1309
+ )
1310
+
1311
+ # 6) Fill new token_pooling with per-examples slices from video_token_pooling
1312
+ patch_offset = 0
1313
+ for i in range(N):
1314
+ num_patches = int(num_pooled_patches_per_example[i].item())
1315
+ cur = video_token_pooling[patch_offset:patch_offset + num_patches] # [num_patches, dim]
1316
+ new_token_pooling[i, :num_patches] = cur
1317
+ patch_offset += num_patches
1318
+
1319
+ # Final sanity checks
1320
+ assert patch_offset == total_num_pooled_patches
1321
+
1322
+ return videos, new_token_pooling
1323
+
1324
+ def merge_visual_inputs(
1325
+ self,
1326
+ input_ids: Optional[torch.LongTensor] = None,
1327
+ pixel_values: Optional[torch.Tensor] = None,
1328
+ image_token_pooling: Optional[torch.Tensor] = None,
1329
+ image_grids: Optional[torch.Tensor] = None,
1330
+ image_num_crops: Optional[torch.Tensor] = None,
1331
+ pixel_values_videos: Optional[torch.Tensor] = None,
1332
+ video_token_pooling: Optional[torch.Tensor] = None,
1333
+ video_grids: Optional[torch.Tensor] = None,
1334
+ ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
1335
+ if pixel_values is not None and pixel_values_videos is not None:
1336
+ raise ValueError("pixel_values and pixel_values_videos are provided at the same time")
1337
+ elif pixel_values is not None:
1338
+ assert input_ids is not None
1339
+ images, token_pooling = self.build_batched_images(
1340
+ input_ids=input_ids,
1341
+ pixel_values=pixel_values,
1342
+ image_token_pooling=image_token_pooling,
1343
+ image_grids=image_grids,
1344
+ image_num_crops=image_num_crops,
1345
+ )
1346
+ elif pixel_values_videos is not None:
1347
+ assert input_ids is not None
1348
+ images, token_pooling = self.build_batched_videos(
1349
+ input_ids=input_ids,
1350
+ pixel_values_videos=pixel_values_videos,
1351
+ video_token_pooling=video_token_pooling,
1352
+ video_grids=video_grids,
1353
+ )
1354
+ else:
1355
+ images, token_pooling = None, None
1356
+ return images, token_pooling
1357
+
1358
+ @can_return_tuple
1359
+ def forward(
1360
+ self,
1361
+ input_ids: Optional[torch.LongTensor] = None,
1362
+ pixel_values: Optional[torch.FloatTensor] = None,
1363
+ image_token_pooling: Optional[torch.Tensor] = None,
1364
+ image_grids: Optional[torch.Tensor] = None,
1365
+ image_num_crops: Optional[torch.Tensor] = None,
1366
+ pixel_values_videos: Optional[torch.Tensor] = None,
1367
+ video_token_pooling: Optional[torch.Tensor] = None,
1368
+ video_grids: Optional[torch.Tensor] = None,
1369
+ attention_mask: Optional[torch.Tensor] = None,
1370
+ position_ids: Optional[torch.Tensor] = None,
1371
+ past_key_values: Optional[Cache] = None,
1372
+ token_type_ids: Optional[torch.LongTensor] = None,
1373
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1374
+ use_cache: Optional[bool] = None,
1375
+ output_attentions: Optional[bool] = None,
1376
+ output_hidden_states: Optional[bool] = None,
1377
+ cache_position: Optional[torch.LongTensor] = None,
1378
+
1379
+ image_data: Optional[ImageCache] = None,
1380
+ last_predicted_patch_id: Optional[torch.LongTensor] = None,
1381
+ **kwargs: Unpack[TransformersKwargs],
1382
+ ) -> Union[tuple, MolmoPointModelOutputWithPast]:
1383
+ """
1384
+ last_point_patch_id: The patch id the last generated point pointed to
1385
+ """
1386
+
1387
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1388
+ output_hidden_states = (
1389
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1390
+ )
1391
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1392
+
1393
+ if (input_ids is None) ^ (inputs_embeds is not None):
1394
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1395
+
1396
+ images, token_pooling = self.merge_visual_inputs(
1397
+ input_ids=input_ids,
1398
+ pixel_values=pixel_values,
1399
+ image_token_pooling=image_token_pooling,
1400
+ image_grids=image_grids,
1401
+ image_num_crops=image_num_crops,
1402
+ pixel_values_videos=pixel_values_videos,
1403
+ video_token_pooling=video_token_pooling,
1404
+ video_grids=video_grids,
1405
+ )
1406
+ if inputs_embeds is not None:
1407
+ raise NotImplementedError("Custom inputs_embeds is not implemented yet")
1408
+
1409
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1410
+
1411
+ if image_data is not None:
1412
+ # Figure out where the patch/subpatch/location are and their values, and then convert
1413
+ # the input_ids back into their original special token values
1414
+ can_point = True
1415
+ bounds = self.build_token_bounds(image_data.token_pooling)
1416
+ expanded_inputs = input_ids
1417
+ is_patch = (input_ids >= bounds.patch_start) & (input_ids < bounds.patch_end_without_no_more_points)
1418
+ is_no_more_points = (input_ids == bounds.no_more_points_token_id)
1419
+ is_subpatch = (input_ids >= bounds.subpatch_start) & (input_ids < bounds.subpatch_end)
1420
+ is_location = (input_ids >= bounds.location_start) & (input_ids < bounds.location_end)
1421
+ input_patch_ids = torch.where(is_patch, input_ids - bounds.patch_start, -1)
1422
+ input_subpatch_ids = torch.where(is_subpatch, input_ids - bounds.subpatch_start, -1)
1423
+ input_ids = torch.where(is_patch | is_no_more_points, self.patch_token_id, input_ids)
1424
+ input_ids = torch.where(is_subpatch, self.subpatch_token_id, input_ids)
1425
+ input_ids = torch.where(is_location, self.location_token_id, input_ids)
1426
+ else:
1427
+ # No patch prediction during pre-filling
1428
+ input_subpatch_ids = None
1429
+ input_patch_ids = None
1430
+ is_patch = None
1431
+ is_subpatch = None
1432
+ can_point = False
1433
+
1434
+ device = input_ids.device
1435
+ x = self.transformer.wte(input_ids).to(device=device)
1436
+ batch_size, _, dim = x.shape
1437
+ batch_idx = torch.arange(batch_size, device=device)
1438
+
1439
+ vit_features_flat: Optional[torch.FloatTensor] = None
1440
+ if images is not None:
1441
+ is_indexable_image_token = input_ids == self.config.image_patch_id
1442
+ is_non_indexable_image_token = input_ids == self.config.image_non_indexable_patch_id
1443
+ is_image_token = is_indexable_image_token | is_non_indexable_image_token
1444
+
1445
+ images = images.to(device=self.device, dtype=self.dtype)
1446
+ B, T, N, D = images.shape
1447
+ images = images.view(B * T, N, D)
1448
+ vit_image_features = self.vit(images)
1449
+
1450
+ features = []
1451
+ for layer in self.vit_layers:
1452
+ features.append(vit_image_features[layer])
1453
+ vit_features = torch.cat(features, dim=-1).to(device=device)
1454
+ vit_feature_dim = vit_features.shape[-1]
1455
+
1456
+ # Gather the features that should be pooled to build patch embeddings
1457
+ vit_features = vit_features.reshape(batch_size, -1, vit_feature_dim)[batch_idx[:, None, None], torch.clip(token_pooling, 0)]
1458
+ vit_features = vit_features * (token_pooling >= 0).float()[:, :, :, None]
1459
+ vit_features_mask = token_pooling >= 0
1460
+
1461
+ # Build the sparse version which will be passed to the connector
1462
+ # Now shape [num_image_tokens_in_batch, pooling_dim, dim]
1463
+ image_features_mask = torch.any(vit_features_mask, -1)
1464
+ vit_features_flat = vit_features.reshape([-1, token_pooling.shape[-1], vit_features.shape[-1]])
1465
+ vit_features_flat = vit_features_flat[image_features_mask.view(-1)]
1466
+ vit_features_to_flat_mask = vit_features_mask.view(-1, token_pooling.shape[-1])[image_features_mask.view(-1)]
1467
+
1468
+ # Finally, apply the connector and add to input embeddings
1469
+ image_features = self.connector(vit_features_flat, vit_features_to_flat_mask).to(device=device)
1470
+ x = x.clone()
1471
+ x.view(-1, dim)[is_image_token.view(-1)] += image_features.view(-1, dim)
1472
+ else:
1473
+ is_image_token = None
1474
+ is_indexable_image_token = None
1475
+ if image_data is not None:
1476
+ # Get the features/masks from the cache
1477
+ token_pooling = image_data.token_pooling.to(device=device)
1478
+ vit_features_mask = token_pooling >= 0
1479
+ image_features_mask = torch.any(vit_features_mask, -1)
1480
+ vit_features = image_data.vit_features.to(device=device)
1481
+ else:
1482
+ vit_features = None
1483
+ vit_features_mask = None
1484
+ image_features_mask = None
1485
+
1486
+ # Embed the points
1487
+ if can_point:
1488
+ image_token_offset = image_data.flat_image_tokens_to_flat_image_features
1489
+ should_embed = (input_patch_ids >= 0) and (input_patch_ids < (bounds.patch_end-1))
1490
+ input_patch_ids_flat = (input_patch_ids + image_token_offset).view(-1)[should_embed.view(-1)]
1491
+ x.view(-1, dim)[is_patch.view(-1)] += image_data.image_features0.view(-1, dim)[input_patch_ids_flat]
1492
+
1493
+ if torch.any(is_subpatch):
1494
+ vit_features_flat = vit_features.reshape([-1, token_pooling.shape[-1], vit_features.shape[-1]])
1495
+ vit_features_flat = vit_features_flat[image_features_mask.view(-1)]
1496
+
1497
+ assert last_predicted_patch_id is not None, "Patch should always be generated before a subpatch"
1498
+ for_patches = (last_predicted_patch_id.view(batch_size) + image_token_offset)[input_subpatch_ids.view(batch_size) >= 0]
1499
+ vit_features_to_embed = vit_features_flat[for_patches, input_subpatch_ids]
1500
+ x.view(-1, dim)[is_subpatch.view(-1)] = self.build_vit_embedding(vit_features_to_embed).to(device=device, dtype=x.dtype)
1501
+
1502
+ # shape: (batch_size, seq_len, d_model)
1503
+ x = self.transformer.emb_drop(x) # type: ignore
1504
+
1505
+ if cache_position is None:
1506
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1507
+ cache_position = torch.arange(
1508
+ past_seen_tokens,
1509
+ past_seen_tokens + inputs_embeds.shape[1],
1510
+ device=inputs_embeds.device,
1511
+ )
1512
+
1513
+ # NOTE: this `is_prefill` logic is not flawless, it fails when we're using a cache eagerly initialized
1514
+ # (e.g. compiled prefill) AND `images` are not provided. Determining prefill in that case requires
1515
+ # checking data values, which is not compile-compatible.
1516
+ is_prefill = (
1517
+ not use_cache
1518
+ or past_key_values is None
1519
+ or not past_key_values.is_initialized
1520
+ or images is not None
1521
+ )
1522
+
1523
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1524
+ # It may already have been prepared by e.g. `generate`
1525
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1526
+ # Prepare mask arguments
1527
+ mask_kwargs = {
1528
+ "config": self.config.get_text_config(),
1529
+ "input_embeds": x,
1530
+ "attention_mask": attention_mask,
1531
+ "cache_position": cache_position,
1532
+ "past_key_values": past_key_values,
1533
+ "position_ids": position_ids,
1534
+ }
1535
+
1536
+ if token_type_ids is not None and is_prefill:
1537
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1538
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1539
+ token_type_ids.to(cache_position.device)
1540
+ )
1541
+
1542
+ # Create the mask
1543
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1544
+
1545
+ outputs = self.transformer(
1546
+ attention_mask=causal_mask_mapping,
1547
+ position_ids=position_ids,
1548
+ past_key_values=past_key_values,
1549
+ inputs_embeds=x,
1550
+ use_cache=use_cache,
1551
+ output_attentions=output_attentions,
1552
+ output_hidden_states=output_hidden_states,
1553
+ cache_position=cache_position,
1554
+ output_pre_ln_state=True,
1555
+ **kwargs,
1556
+ )
1557
+ x = outputs.pre_ln_hidden_state
1558
+ patch_logits = None
1559
+ subpatch_logits = None
1560
+ location_logits = None
1561
+
1562
+ if images is not None or image_data is not None:
1563
+ patch_logits, subpatch_logits, location_logits, image_data = self.point_predictor(
1564
+ x,
1565
+ token_pooling,
1566
+ is_image_token,
1567
+ is_patch,
1568
+ is_subpatch,
1569
+ is_indexable_image_token,
1570
+ vit_features,
1571
+ vit_features_mask,
1572
+ image_features_mask,
1573
+ input_patch_ids,
1574
+ last_predicted_patch_id,
1575
+ image_data
1576
+ )
1577
+ if images is not None:
1578
+ # Also cache stuff we need to building the patch/subpatch token embeddings
1579
+ image_data.image_features0 = image_features
1580
+ num_image_tokens = is_image_token.sum(-1)
1581
+ image_token_offset = torch.cumsum(num_image_tokens[:-1], 0)
1582
+ image_token_offset = F.pad(image_token_offset, [1, 0])
1583
+ image_data.flat_image_tokens_to_flat_image_features = image_token_offset
1584
+
1585
+ if last_predicted_patch_id is not None:
1586
+ last_predicted_patch_id = torch.where(input_patch_ids == -1, last_predicted_patch_id, input_patch_ids)
1587
+ else:
1588
+ last_predicted_patch_id = input_patch_ids
1589
+
1590
+ return MolmoPointModelOutputWithPast(
1591
+ last_hidden_state=outputs.last_hidden_state,
1592
+ past_key_values=outputs.past_key_values,
1593
+ hidden_states=outputs.hidden_states,
1594
+ attentions=outputs.attentions,
1595
+ image_hidden_states=image_features if images is not None else None,
1596
+ image_data=image_data,
1597
+ patch_logits=patch_logits,
1598
+ subpatch_logits=subpatch_logits,
1599
+ location_logits=location_logits,
1600
+ last_predicted_patch_id=last_predicted_patch_id,
1601
+ )
1602
+
1603
+
1604
+ class ExtendedLmHead(nn.Module):
1605
+ def __init__(self, config, output_embeddings=None, new_output_embeddings=None):
1606
+ super().__init__()
1607
+ if output_embeddings is None:
1608
+ self.output_embeddings = nn.Parameter(torch.zeros([config.vocab_size, config.hidden_size]))
1609
+ self.new_output_embeddings = nn.Parameter(torch.zeros([128, config.hidden_size]))
1610
+ else:
1611
+ self.output_embeddings = output_embeddings
1612
+ self.new_output_embeddings = new_output_embeddings
1613
+
1614
+ def __call__(self, hidden_states, slice_indices=None):
1615
+ lm_head = torch.concatenate([self.output_embeddings, self.new_output_embeddings], dim=0)
1616
+ return F.linear(hidden_states[:, slice_indices, :], lm_head.to(device=hidden_states.device))
1617
+
1618
+
1619
+ class MolmoPointForConditionalGeneration(MolmoPointPreTrainedModel, GenerationMixin):
1620
+ _checkpoint_conversion_mapping = {}
1621
+ # Reference: fix gemma3 grad acc #37208
1622
+ accepts_loss_kwargs = False
1623
+ config: MolmoPointConfig
1624
+
1625
+ def __init__(self, config: MolmoPointConfig):
1626
+ super().__init__(config)
1627
+
1628
+ self.model = MolmoPointModel(config)
1629
+ if config.text_config.tie_word_embeddings:
1630
+ assert isinstance(self.model.transformer.wte, Molmo2Embedding)
1631
+ self.lm_head = ExtendedLmHead(config, self.model.transformer.wte.embedding, self.model.transformer.wte.new_embedding)
1632
+ else:
1633
+ self.lm_head = ExtendedLmHead(config)
1634
+ self.vocab_size = config.vocab_size
1635
+
1636
+ # Initialize weights and apply final processing
1637
+ self.post_init()
1638
+
1639
+ @property
1640
+ def _tied_weights_keys(self):
1641
+ if self.config.text_config.tie_word_embeddings:
1642
+ return ["lm_head.output_embeddings", "lm_head.new_output_embeddings"]
1643
+ return []
1644
+
1645
+ def build_logit_processor_from_inputs(self, inputs) -> LogitsProcessorList:
1646
+ if inputs.get("image_token_pooling") is not None:
1647
+ pooling = inputs["image_token_pooling"]
1648
+ elif inputs.get("video_token_pooling") is not None:
1649
+ pooling = inputs["video_token_pooling"]
1650
+ else:
1651
+ return []
1652
+ return [self.build_logit_processor(pooling)]
1653
+
1654
+ def build_logit_processor(self, token_pooling):
1655
+ return MolmoPointLogitProcessor(
1656
+ bounds=self.model.build_token_bounds(token_pooling),
1657
+ prevent_repeats=self.config.mask_repeats in ["all", "inference"],
1658
+ force_patch_sorted=self.config.mask_patches in ["always", "inference"],
1659
+ force_subpatch_sorted=self.config.mask_subpatches in ["always", "inference"],
1660
+ )
1661
+
1662
+ def extract_image_points(self, output_text, pooling, subpatch_mapping, image_sizes):
1663
+ return extract_image_points(
1664
+ output_text, pooling, subpatch_mapping, self.config.no_more_points_class,
1665
+ self.config.patch_location, image_sizes)
1666
+
1667
+ def extract_video_points(self, output_text, pooling, subpatch_mapping, timestamps, video_size):
1668
+ return extract_video_points(
1669
+ output_text, pooling, subpatch_mapping, timestamps, self.config.no_more_points_class,
1670
+ self.config.patch_location, video_size)
1671
+
1672
+ def tie_weights(self):
1673
+ if self.config.text_config.tie_word_embeddings:
1674
+ self.lm_head.output_embeddings = self.model.transformer.wte.embedding
1675
+ self.lm_head.new_output_embeddings = self.model.transformer.wte.new_embedding
1676
+
1677
+ def get_input_embeddings(self) -> torch.nn.Module:
1678
+ return self.model.transformer.wte
1679
+
1680
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1681
+ self.model.transformer.wte = value
1682
+
1683
+ def set_decoder(self, decoder):
1684
+ self.model.set_decoder(decoder)
1685
+
1686
+ def get_decoder(self):
1687
+ return self.model.get_decoder()
1688
+
1689
+ # Make modules available throught conditional class for BC
1690
+ @property
1691
+ def language_model(self) -> torch.nn.Module:
1692
+ return self.model.transformer
1693
+
1694
+ @property
1695
+ def vision_backbone(self) -> torch.nn.Module:
1696
+ return self.model.vision_backbone
1697
+
1698
+ @can_return_tuple
1699
+ def forward(
1700
+ self,
1701
+ input_ids: torch.LongTensor = None,
1702
+ pixel_values: Optional[torch.Tensor] = None,
1703
+ image_token_pooling: Optional[torch.Tensor] = None,
1704
+ image_grids: Optional[torch.Tensor] = None,
1705
+ image_num_crops: Optional[torch.Tensor] = None,
1706
+ pixel_values_videos: Optional[torch.Tensor] = None,
1707
+ video_token_pooling: Optional[torch.Tensor] = None,
1708
+ video_grids: Optional[torch.Tensor] = None,
1709
+ attention_mask: Optional[torch.Tensor] = None,
1710
+ position_ids: Optional[torch.LongTensor] = None,
1711
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1712
+ token_type_ids: Optional[torch.LongTensor] = None,
1713
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1714
+ labels: Optional[torch.LongTensor] = None,
1715
+ use_cache: Optional[bool] = None,
1716
+ output_attentions: Optional[bool] = None,
1717
+ output_hidden_states: Optional[bool] = None,
1718
+ cache_position: Optional[torch.LongTensor] = None,
1719
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1720
+ image_data: Optional[ImageCache] = None,
1721
+ last_predicted_patch_id: Optional[torch.LongTensor] = None,
1722
+ **kwargs: Unpack[TransformersKwargs],
1723
+ ) -> Union[tuple, MolmoPointCausalLMOutputWithPast]:
1724
+ r"""
1725
+ ```python
1726
+ >>> from PIL import Image
1727
+ >>> import requests
1728
+ >>> from transformers import AutoProcessor, MolmoPointForConditionalGeneration
1729
+
1730
+ >>> model = Molmo2ForConditionalGeneration.from_pretrained("...")
1731
+ >>> processor = AutoProcessor.from_pretrained("...")
1732
+
1733
+ >>> prompt = "What's the content of the image?"
1734
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1735
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1736
+
1737
+ >>> messages = [{"role": "user", "content": [{"type": "text", "text": prompt}, {"type": "image", "image": image}]}]
1738
+
1739
+ >>> inputs = processor.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True)
1740
+
1741
+ >>> # Generate
1742
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=15)
1743
+ >>> generated_tokens = generated_ids[:, inputs['input_ids'].size(1):]
1744
+ >>> processor.post_process_image_text_to_text(generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1745
+ "The image shows a bustling street scene in what appears to be a Chinatown area. There's ..."
1746
+ ```"""
1747
+ outputs: MolmoPointModelOutputWithPast = self.model(
1748
+ input_ids=input_ids,
1749
+ pixel_values=pixel_values,
1750
+ image_token_pooling=image_token_pooling,
1751
+ image_grids=image_grids,
1752
+ image_num_crops=image_num_crops,
1753
+ pixel_values_videos=pixel_values_videos,
1754
+ video_token_pooling=video_token_pooling,
1755
+ video_grids=video_grids,
1756
+ attention_mask=attention_mask,
1757
+ position_ids=position_ids,
1758
+ past_key_values=past_key_values,
1759
+ token_type_ids=token_type_ids,
1760
+ inputs_embeds=inputs_embeds,
1761
+ use_cache=use_cache,
1762
+ output_attentions=output_attentions,
1763
+ output_hidden_states=output_hidden_states,
1764
+ cache_position=cache_position,
1765
+ image_data=image_data,
1766
+ last_predicted_patch_id=last_predicted_patch_id,
1767
+ **kwargs,
1768
+ )
1769
+
1770
+ hidden_states = outputs.last_hidden_state
1771
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1772
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1773
+ logits = self.lm_head(hidden_states, slice_indices=slice_indices)
1774
+
1775
+ loss = None
1776
+ if labels is not None:
1777
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size)
1778
+
1779
+ bs, seq, _ = logits.shape
1780
+ if image_data is not None:
1781
+ token_pooling = image_data.token_pooling
1782
+ else:
1783
+ token_pooling = video_token_pooling if video_token_pooling is not None else image_token_pooling
1784
+ n_patches, n_subpatches = token_pooling.shape[-2:]
1785
+ if self.config.no_more_points_class:
1786
+ n_patches += 1
1787
+ small_val = -100000
1788
+
1789
+ # The patch token is a bit tricky since we train the model to first select whether to
1790
+ # generate a patch token or not, and then to select the patch, but this two-stage
1791
+ # process is hard to emulate in generation frameworks
1792
+ # Our hack here is to assume that, if we generate a TOKEN, we always select the argmax
1793
+ # patch. Then we can use PATCH_TOKEN scores as the argmax's patch scores
1794
+ device = logits.device
1795
+ predicted_tokens = torch.argmax(logits[:, -1], dim=-1)
1796
+ patch_token_logits = torch.clone(logits[:, :, self.config.patch_token_id])
1797
+ logits[:, :, self.config.patch_token_id] = small_val
1798
+ predicted_patch = predicted_tokens == self.config.patch_token_id
1799
+ argmax_patch_logits = torch.full([bs, seq, n_patches], small_val, dtype=logits.dtype, device=device)
1800
+ if outputs.patch_logits is not None:
1801
+ selected_patches = torch.argmax(outputs.patch_logits, -1).to(device=device)
1802
+ bs, seq, n_patches = outputs.patch_logits.shape
1803
+ batch_idx = torch.arange(outputs.patch_logits.shape[0], device=device)
1804
+ seq_ix = torch.arange(outputs.patch_logits.shape[1], device=device)
1805
+ argmax_patch_logits[batch_idx.view(-1, 1, 1), seq_ix.view(1, -1, 1), selected_patches] = patch_token_logits
1806
+
1807
+ logits[:, :, self.config.subpatch_token_id] = small_val
1808
+ if outputs.subpatch_logits is not None:
1809
+ subpatch_logits = outputs.subpatch_logits
1810
+ else:
1811
+ subpatch_logits = torch.full([bs, seq, n_subpatches], small_val, dtype=logits.dtype, device=device)
1812
+
1813
+ logits[:, :, self.config.location_token_id] = small_val
1814
+ if outputs.location_logits is not None:
1815
+ location_logits = outputs.location_logits
1816
+ else:
1817
+ location_logits = torch.full([bs, seq, 9], small_val, dtype=logits.dtype, device=device)
1818
+
1819
+ logits = torch.concatenate([
1820
+ logits,
1821
+ argmax_patch_logits,
1822
+ subpatch_logits.to(device=device),
1823
+ location_logits.to(device=device)
1824
+ ], -1)
1825
+
1826
+ return MolmoPointCausalLMOutputWithPast(
1827
+ loss=loss,
1828
+ logits=logits,
1829
+ past_key_values=outputs.past_key_values,
1830
+ hidden_states=outputs.hidden_states,
1831
+ attentions=outputs.attentions,
1832
+ image_hidden_states=outputs.image_hidden_states,
1833
+ image_data=outputs.image_data,
1834
+ patch_logits=outputs.patch_logits,
1835
+ subpatch_logits=outputs.subpatch_logits,
1836
+ location_logits=outputs.location_logits,
1837
+ last_predicted_patch_id=outputs.last_predicted_patch_id,
1838
+ )
1839
+
1840
+ def prepare_inputs_for_generation(
1841
+ self,
1842
+ input_ids: torch.LongTensor,
1843
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1844
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1845
+ pixel_values: Optional[torch.FloatTensor] = None,
1846
+ image_token_pooling: Optional[torch.Tensor] = None,
1847
+ image_grids: Optional[torch.Tensor] = None,
1848
+ image_num_crops: Optional[torch.Tensor] = None,
1849
+ pixel_values_videos: Optional[torch.Tensor] = None,
1850
+ video_token_pooling: Optional[torch.Tensor] = None,
1851
+ video_grids: Optional[torch.Tensor] = None,
1852
+ attention_mask: Optional[torch.Tensor] = None,
1853
+ token_type_ids: Optional[torch.LongTensor] = None,
1854
+ cache_position: Optional[torch.LongTensor] = None,
1855
+ logits_to_keep: Optional[Union[int, torch.Tensor]] = None,
1856
+ image_data: Optional[ImageCache] = None,
1857
+ **kwargs,
1858
+ ):
1859
+ model_inputs = super().prepare_inputs_for_generation(
1860
+ input_ids,
1861
+ past_key_values=past_key_values,
1862
+ inputs_embeds=inputs_embeds,
1863
+ attention_mask=attention_mask,
1864
+ cache_position=cache_position,
1865
+ logits_to_keep=logits_to_keep,
1866
+ token_type_ids=token_type_ids,
1867
+ image_data=image_data,
1868
+ **kwargs,
1869
+ )
1870
+
1871
+ if cache_position[0] == 0:
1872
+ model_inputs["pixel_values"] = pixel_values
1873
+ model_inputs["image_token_pooling"] = image_token_pooling
1874
+ model_inputs["image_grids"] = image_grids
1875
+ model_inputs["image_num_crops"] = image_num_crops
1876
+ model_inputs["pixel_values_videos"] = pixel_values_videos
1877
+ model_inputs["video_token_pooling"] = video_token_pooling
1878
+ model_inputs["video_grids"] = video_grids
1879
+
1880
+ return model_inputs
1881
+
1882
+ def _update_model_kwargs_for_generation(
1883
+ self,
1884
+ outputs: MolmoPointModelOutputWithPast,
1885
+ model_kwargs: dict[str, Any],
1886
+ is_encoder_decoder: bool = False,
1887
+ num_new_tokens: int = 1,
1888
+ ) -> dict[str, Any]:
1889
+ args = super()._update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder, num_new_tokens)
1890
+ if outputs.image_data is not None:
1891
+ args["image_data"] = outputs.image_data
1892
+ args["last_predicted_patch_id"] = outputs.last_predicted_patch_id
1893
+ return args
1894
+
1895
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1896
+ @staticmethod
1897
+ def create_masks_for_generate(
1898
+ config: PretrainedConfig,
1899
+ input_embeds: torch.Tensor,
1900
+ attention_mask: Optional[torch.Tensor],
1901
+ cache_position: torch.Tensor,
1902
+ past_key_values: Optional[Cache],
1903
+ position_ids: Optional[torch.Tensor],
1904
+ token_type_ids: Optional[torch.Tensor] = None,
1905
+ **kwargs,
1906
+ ) -> dict:
1907
+ # Prepare mask arguments
1908
+ mask_kwargs = {
1909
+ "config": config.get_text_config(),
1910
+ "input_embeds": input_embeds,
1911
+ "attention_mask": attention_mask,
1912
+ "cache_position": cache_position,
1913
+ "past_key_values": past_key_values,
1914
+ "position_ids": position_ids,
1915
+ }
1916
+ # Add the token type ids mask for generate as well
1917
+ if token_type_ids is not None and input_embeds.shape[1] != 1:
1918
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1919
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1920
+ token_type_ids.to(cache_position.device)
1921
+ )
1922
+
1923
+ return create_masks_for_generate(**mask_kwargs)
1924
+
1925
+
1926
+ # Always register for multi-modal features
1927
+ AutoModelForImageTextToText.register(MolmoPointConfig, MolmoPointForConditionalGeneration)
preprocessor_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "image_processing_molmo2.Molmo2ImageProcessor",
4
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
5
+ },
6
+ "do_convert_rgb": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Molmo2ImageProcessor",
13
+ "image_std": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "max_crops": 8,
19
+ "overlap_margins": [
20
+ 4,
21
+ 4
22
+ ],
23
+ "patch_size": 14,
24
+ "pooling_size": [
25
+ 2,
26
+ 2
27
+ ],
28
+ "processor_class": "Molmo2Processor",
29
+ "resample": 2,
30
+ "size": {
31
+ "height": 378,
32
+ "width": 378
33
+ }
34
+ }
processing_molmo2.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Processor class for Molmo2.
3
+ """
4
+ from typing import Optional, Union
5
+ import dataclasses
6
+
7
+ import numpy as np
8
+
9
+ from transformers.image_utils import ImageInput
10
+ from transformers.video_utils import VideoInput
11
+ from transformers.processing_utils import (
12
+ Unpack,
13
+ ProcessingKwargs,
14
+ ProcessorMixin, AllKwargsForChatTemplate,
15
+ )
16
+ from transformers.feature_extraction_utils import BatchFeature
17
+ from transformers.tokenization_utils_base import TextInput, PreTokenizedInput
18
+ from transformers.utils import logging
19
+
20
+ from transformers import AutoTokenizer
21
+ from .image_processing_molmo2 import Molmo2ImagesKwargs, Molmo2ImageProcessor
22
+ from .video_processing_molmo2 import Molmo2VideoProcessorKwargs, Molmo2VideoProcessor
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ # Special tokens, these should be present in any tokenizer we use since the preprocessor uses them
29
+ IMAGE_PATCH_TOKEN = f"<im_patch>" # Where to insert high-res tokens
30
+ IMAGE_LOW_RES_TOKEN = f"<im_low>" # Where to insert low-res tokens
31
+ IM_START_TOKEN = f"<im_start>"
32
+ LOW_RES_IMAGE_START_TOKEN = f"<low_res_im_start>"
33
+ FRAME_START_TOKEN = f"<frame_start>"
34
+ IM_END_TOKEN = f"<im_end>"
35
+ FRAME_END_TOKEN= f"<frame_end>"
36
+ IM_COL_TOKEN = f"<im_col>"
37
+ IMAGE_PROMPT = "<|image|>"
38
+ VIDEO_PROMPT = "<|video|>"
39
+
40
+ IMAGE_TOKENS = [
41
+ IMAGE_PATCH_TOKEN,
42
+ IM_COL_TOKEN,
43
+ IM_START_TOKEN,
44
+ LOW_RES_IMAGE_START_TOKEN,
45
+ FRAME_START_TOKEN,
46
+ IM_END_TOKEN,
47
+ FRAME_END_TOKEN,
48
+ IMAGE_LOW_RES_TOKEN,
49
+ ]
50
+
51
+
52
+ class Molmo2ProcessorKwargs(ProcessingKwargs, total=False):
53
+ """Molmo2 processor kwargs"""
54
+ images_kwargs: Molmo2ImagesKwargs
55
+ videos_kwargs: Molmo2VideoProcessorKwargs
56
+ _defaults = {
57
+ "text_kwargs": {
58
+ "padding": False,
59
+ "return_mm_token_type_ids": True,
60
+ },
61
+ "videos_kwargs": {"return_metadata": True},
62
+ }
63
+
64
+
65
+ class Molmo2Processor(ProcessorMixin):
66
+ attributes = ["image_processor", "video_processor", "tokenizer"]
67
+ optional_attributes = [
68
+ "chat_template",
69
+ "time_mode",
70
+ "image_use_col_tokens",
71
+ "use_single_crop_col_tokens",
72
+ "use_single_crop_start_token",
73
+ "video_use_col_tokens",
74
+ "use_frame_special_tokens",
75
+ ]
76
+ image_processor_class = "AutoImageProcessor"
77
+ video_processor_class = "AutoVideoProcessor"
78
+ tokenizer_class = "AutoTokenizer"
79
+
80
+ def __init__(
81
+ self,
82
+ image_processor: Molmo2ImageProcessor = None,
83
+ video_processor: Molmo2VideoProcessor = None,
84
+ tokenizer: AutoTokenizer = None,
85
+ chat_template: Optional[str] = None,
86
+ image_use_col_tokens: Optional[bool] = True,
87
+ use_single_crop_col_tokens: Optional[bool] = None,
88
+ use_single_crop_start_token: Optional[bool] = True,
89
+ video_use_col_tokens: Optional[bool] = False,
90
+ use_frame_special_tokens: Optional[bool] = True,
91
+ use_low_res_token_for_global_crops: bool = False,
92
+ **kwargs
93
+ ) -> None:
94
+ super().__init__(
95
+ image_processor,
96
+ video_processor,
97
+ tokenizer,
98
+ chat_template=chat_template,
99
+ image_use_col_tokens=image_use_col_tokens,
100
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
101
+ use_single_crop_start_token=use_single_crop_start_token,
102
+ video_use_col_tokens=video_use_col_tokens,
103
+ use_frame_special_tokens=use_frame_special_tokens,
104
+ )
105
+ self.image_placeholder_token = IMAGE_PROMPT
106
+ self.video_placeholder_token = VIDEO_PROMPT
107
+ self.image_token_ids = [
108
+ tokenizer.convert_tokens_to_ids(token)
109
+ for token in IMAGE_TOKENS
110
+ ]
111
+ self.use_low_res_token_for_global_crops = use_low_res_token_for_global_crops
112
+ self._patch_metadata = None
113
+
114
+ def get_image_tokens(self, image_grid: np.ndarray):
115
+ resized_h, resized_w, height, width = image_grid
116
+ per_row = np.full(width, IMAGE_PATCH_TOKEN)
117
+ if self.image_use_col_tokens:
118
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
119
+ joint = [
120
+ [IM_START_TOKEN],
121
+ np.tile(per_row, [height]),
122
+ [IM_END_TOKEN],
123
+ ]
124
+ if self.use_low_res_token_for_global_crops:
125
+ per_row = np.full(resized_w, IMAGE_LOW_RES_TOKEN)
126
+ else:
127
+ per_row = np.full(resized_w, IMAGE_PATCH_TOKEN)
128
+ use_single_crop_col_tokens = (
129
+ self.image_use_col_tokens
130
+ if self.use_single_crop_col_tokens is None
131
+ else self.use_single_crop_col_tokens
132
+ )
133
+ image_start_token = (
134
+ LOW_RES_IMAGE_START_TOKEN
135
+ if self.use_single_crop_start_token
136
+ else IM_START_TOKEN
137
+ )
138
+ if use_single_crop_col_tokens:
139
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
140
+ joint = [
141
+ [image_start_token],
142
+ np.tile(per_row, [resized_h]),
143
+ [IM_END_TOKEN],
144
+ ] + joint
145
+
146
+ return np.concatenate(joint)
147
+
148
+ def get_video_string(
149
+ self,
150
+ video_grid: np.ndarray,
151
+ timestamps: np.ndarray,
152
+ ):
153
+ if self.use_frame_special_tokens:
154
+ start_token_id = FRAME_START_TOKEN
155
+ end_token_id = FRAME_END_TOKEN
156
+ else:
157
+ start_token_id = IM_START_TOKEN
158
+ end_token_id = IM_END_TOKEN
159
+
160
+ num_frames, h, w = video_grid
161
+ video_string: str = ""
162
+ for frame_idx, frame_time in enumerate(timestamps):
163
+ # `per-frame-compact` time mode
164
+ prev_space = " " if frame_idx > 0 else ""
165
+ frame_prefix = prev_space + f"{frame_time:.1f} " # explicit whitespace before/after image tokens
166
+
167
+ video_string += frame_prefix
168
+ per_row = np.full(w, IMAGE_PATCH_TOKEN)
169
+ if self.video_use_col_tokens:
170
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
171
+ extra_tokens = np.tile(per_row, [h])
172
+ video_tokens = [
173
+ [start_token_id],
174
+ extra_tokens,
175
+ [end_token_id],
176
+ ]
177
+ video_string += "".join(np.concatenate(video_tokens, 0))
178
+
179
+ return video_string
180
+
181
+ def insert_bos(
182
+ self,
183
+ input_ids: np.ndarray,
184
+ attention_mask: np.ndarray,
185
+ bos_token_id: int,
186
+ pad_token_id: int,
187
+ ):
188
+ """
189
+ Args:
190
+ input_ids: [B, S] array with left padding
191
+ attention_mask: [B, S] array (0 for pad, 1 for valid)
192
+ bos_token_id: int
193
+ pad_token_id: int
194
+ Returns:
195
+ input_ids_out: [B, S] or [B, S+1] array with bos inserted if needed
196
+ attention_mask_out: same shape as input_ids_out
197
+ """
198
+
199
+ need_to_expand = len(input_ids.shape) == 1
200
+ if need_to_expand:
201
+ input_ids = input_ids[None, :]
202
+ attention_mask = attention_mask[None, :]
203
+
204
+ B, S = input_ids.shape
205
+
206
+ # Handle zero-length sequence
207
+ if S == 0:
208
+ new_input_ids = np.full((B, 1), bos_token_id, dtype=input_ids.dtype)
209
+ new_attention_mask = np.ones((B, 1), dtype=attention_mask.dtype)
210
+ if need_to_expand:
211
+ new_input_ids = new_input_ids[0]
212
+ new_attention_mask = new_attention_mask[0]
213
+ return new_input_ids, new_attention_mask
214
+
215
+ first_valid_index = (attention_mask == 1).argmax(axis=-1) # [B]
216
+ bos_already_present = np.all(input_ids[np.arange(B), first_valid_index] == bos_token_id)
217
+
218
+ if bos_already_present:
219
+ if need_to_expand:
220
+ input_ids = input_ids[0]
221
+ attention_mask = attention_mask[0]
222
+ return input_ids, attention_mask
223
+ else:
224
+ new_input_ids = np.full((B, S+1), pad_token_id, dtype=input_ids.dtype)
225
+ new_attention_mask = np.zeros((B, S+1), dtype=attention_mask.dtype)
226
+
227
+ src_idx = np.tile(np.arange(S), (B, 1)) # [B, S]
228
+ valid_mask = src_idx >= first_valid_index[:, None] # [B, S]
229
+ tgt_idx = src_idx + 1 # shit right
230
+ batch_idx = np.tile(np.arange(B)[:, None], (1, S)) # [B, S]
231
+
232
+ # flatten valid_positions
233
+ flat_vals = input_ids[valid_mask]
234
+ flat_batch = batch_idx[valid_mask]
235
+ flat_tgt = tgt_idx[valid_mask]
236
+
237
+ new_input_ids[flat_batch, flat_tgt] = flat_vals
238
+ new_attention_mask[flat_batch, flat_tgt] = 1
239
+
240
+ insert_pos = first_valid_index
241
+ new_input_ids[np.arange(B), insert_pos] = bos_token_id
242
+ new_attention_mask[np.arange(B), insert_pos] = 1
243
+
244
+ if need_to_expand:
245
+ new_input_ids = new_input_ids[0]
246
+ new_attention_mask = new_attention_mask[0]
247
+
248
+ return new_input_ids, new_attention_mask
249
+
250
+ def __call__(
251
+ self,
252
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
253
+ images: ImageInput = None,
254
+ videos: VideoInput = None,
255
+ return_pointing_metadata: bool = False,
256
+ use_low_res_token_for_global_crops: bool = False,
257
+ **kwargs: Unpack[Molmo2ProcessorKwargs],
258
+ ) -> BatchFeature:
259
+ """
260
+
261
+ Args:
262
+ text (`str`, `list[str]`, `list[list[str]]`):
263
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
264
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
265
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
266
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
267
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
268
+ tensor. Both channels-first and channels-last formats are supported.
269
+ videos (`dict[str, Any]` or `list[dict[str, Any]]`):
270
+ The video or batch of videos to be prepared. Each video can be a dictionary with the following keys:
271
+ - `"frames"`: `np.ndarray` of shape (T, H, W, 3)
272
+ - `"timestamps"`: `np.ndarray` of shape (T,)
273
+ - `"sampled_fps"`: `float` (optional)
274
+ - `"sampling_augmentation"`: `str` (optional)
275
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
276
+ If set, will return tensors of a particular framework. Acceptable values are:
277
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
278
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
279
+ - `'np'`: Return NumPy `np.ndarray` objects.
280
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
281
+
282
+ Returns:
283
+ `BatchFeature`: A [`BatchFeature`] with the following fields:
284
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
285
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
286
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`).
287
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
288
+ - **image_token_pooling** -- Indices of the patches in `image_grids` to pool for each token in `image_tokens`.
289
+ Returned when `images` is not `None`.
290
+ - **image_grids** -- Grids of images. Returned when `images` is not `None`.
291
+ - **image_num_crops** -- Number of crops for each image. Returned when `images` is not `None`.
292
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
293
+ - **video_token_pooling** -- Indices of the patches in `video_grids` to pool for each token in `video_tokens`.
294
+ Returned when `videos` is not `None`.
295
+ - **video_grids** -- Grids of videos. Returned when `videos` is not `None`.
296
+ """
297
+ output_kwargs = self._merge_kwargs(
298
+ Molmo2ProcessorKwargs,
299
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
300
+ **kwargs,
301
+ )
302
+ patch_metadata = {}
303
+ if images is not None:
304
+ image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"],
305
+ return_pointing_metadata=return_pointing_metadata)
306
+ if return_pointing_metadata:
307
+ patch_metadata["token_pooling"] = image_inputs.pop("image_token_pooling_np")
308
+ patch_metadata["subpatch_mapping"] = image_inputs.pop("subpatch_mapping")
309
+ patch_metadata["image_sizes"] = image_inputs.pop("image_sizes")
310
+ image_grids = image_inputs["image_grids"]
311
+ else:
312
+ image_inputs = {}
313
+ image_grids = None
314
+
315
+ if videos is not None:
316
+ videos_inputs = self.video_processor(
317
+ videos=videos, **output_kwargs["videos_kwargs"],
318
+ return_pointing_metadata=return_pointing_metadata
319
+ )
320
+ if return_pointing_metadata:
321
+ assert len(videos_inputs['video_metadata']) == 1
322
+ vd_metadata = videos_inputs['video_metadata'][0]
323
+ patch_metadata["token_pooling"] = videos_inputs.pop("video_token_pooling_np")
324
+ patch_metadata["subpatch_mapping"] = videos_inputs.pop("subpatch_mapping")
325
+ patch_metadata["timestamps"] = vd_metadata.timestamps
326
+ patch_metadata["video_size"] = (vd_metadata.width, vd_metadata.height)
327
+
328
+ video_grids = videos_inputs["video_grids"]
329
+ # If user has not requested video metadata, pop it
330
+ if "return_metadata" not in kwargs:
331
+ video_metadata = videos_inputs.pop("video_metadata")
332
+ else:
333
+ video_metadata = videos_inputs["video_metadata"]
334
+ else:
335
+ videos_inputs = {}
336
+ video_grids = None
337
+
338
+ if not isinstance(text, list):
339
+ text = [text]
340
+
341
+ text = text.copy() # below lines change text in-place
342
+
343
+ if image_grids is not None:
344
+ index = 0
345
+ for i in range(len(text)):
346
+ num_images = text[i].count(self.image_placeholder_token)
347
+ image_grids_i = image_grids[index:index+num_images]
348
+ for image_grid in image_grids_i:
349
+ image_tokens = self.get_image_tokens(image_grid)
350
+ image_string = "".join(image_tokens)
351
+ text[i] = text[i].replace(self.image_placeholder_token, image_string, 1)
352
+ index += num_images
353
+
354
+ if video_grids is not None:
355
+ index = 0
356
+ for i in range(len(text)):
357
+ num_videos = text[i].count(self.video_placeholder_token)
358
+ assert num_videos in {0, 1}, "At most one video is supported for now"
359
+ video_grids_i = video_grids[index:index+num_videos]
360
+ metadata_i = video_metadata[index:index+num_videos]
361
+ for video_grid, metadata in zip(video_grids_i, metadata_i):
362
+ video_string = self.get_video_string(
363
+ video_grid,
364
+ metadata.timestamps,
365
+ )
366
+ text[i] = text[i].replace(self.video_placeholder_token, video_string, 1)
367
+ index += num_videos
368
+
369
+ return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
370
+ return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
371
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
372
+
373
+ input_ids = text_inputs["input_ids"]
374
+ attention_mask = text_inputs["attention_mask"]
375
+
376
+ input_ids = np.array(input_ids)
377
+ attention_mask = np.array(attention_mask)
378
+
379
+ bos = self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
380
+ input_ids, attention_mask = self.insert_bos(
381
+ input_ids, attention_mask, bos, self.tokenizer.pad_token_id
382
+ )
383
+
384
+ if return_mm_token_type_ids:
385
+ image_tokens = np.array(self.image_token_ids).astype(input_ids.dtype)
386
+ token_type_ids = np.any(input_ids[:, :, None] == image_tokens[None, None, :], axis=-1)
387
+ text_inputs["token_type_ids"] = token_type_ids.tolist()
388
+
389
+ text_inputs["input_ids"] = input_ids.tolist()
390
+ text_inputs["attention_mask"] = attention_mask.tolist()
391
+
392
+ features = BatchFeature(
393
+ data={**text_inputs, **image_inputs, **videos_inputs},
394
+ tensor_type=return_tensors,
395
+ )
396
+ if return_pointing_metadata:
397
+ features["metadata"] = patch_metadata
398
+ return features
399
+
400
+ def post_process_image_text_to_text(
401
+ self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
402
+ ):
403
+ """
404
+ Post-process the output of the model to decode the text.
405
+
406
+ Args:
407
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
408
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
409
+ or `(sequence_length,)`.
410
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
411
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
412
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
413
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
414
+ **kwargs:
415
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
416
+
417
+ Returns:
418
+ `list[str]`: The decoded text.
419
+ """
420
+ return self.tokenizer.batch_decode(
421
+ generated_outputs,
422
+ skip_special_tokens=skip_special_tokens,
423
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
424
+ **kwargs,
425
+ )
426
+
427
+
428
+ Molmo2Processor.register_for_auto_class()
processing_molmo_point.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Processor class for Molmo2.
3
+ """
4
+ from typing import Optional, Union
5
+ import dataclasses
6
+
7
+ import numpy as np
8
+
9
+ from transformers.image_utils import ImageInput
10
+ from transformers.video_utils import VideoInput
11
+ from transformers.processing_utils import (
12
+ Unpack,
13
+ ProcessingKwargs,
14
+ ProcessorMixin,
15
+ )
16
+ from transformers.feature_extraction_utils import BatchFeature
17
+ from transformers.tokenization_utils_base import TextInput, PreTokenizedInput
18
+ from transformers.utils import logging
19
+
20
+ from transformers import AutoTokenizer
21
+ from .image_processing_molmo2 import Molmo2ImagesKwargs, Molmo2ImageProcessor
22
+ from .video_processing_molmo2 import Molmo2VideoProcessorKwargs, Molmo2VideoProcessor
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ # Special tokens, these should be present in any tokenizer we use since the preprocessor uses them
29
+ IMAGE_PATCH_TOKEN = f"<im_patch>" # Where to insert high-res tokens
30
+ IMAGE_LOW_RES_TOKEN = f"<im_low>" # Where to insert low-res tokens
31
+ IM_START_TOKEN = f"<im_start>"
32
+ LOW_RES_IMAGE_START_TOKEN = f"<low_res_im_start>"
33
+ FRAME_START_TOKEN = f"<frame_start>"
34
+ IM_END_TOKEN = f"<im_end>"
35
+ FRAME_END_TOKEN= f"<frame_end>"
36
+ IM_COL_TOKEN = f"<im_col>"
37
+ IMAGE_PROMPT = "<|image|>"
38
+ VIDEO_PROMPT = "<|video|>"
39
+
40
+ IMAGE_TOKENS = [
41
+ IMAGE_PATCH_TOKEN,
42
+ IM_COL_TOKEN,
43
+ IM_START_TOKEN,
44
+ LOW_RES_IMAGE_START_TOKEN,
45
+ FRAME_START_TOKEN,
46
+ IM_END_TOKEN,
47
+ FRAME_END_TOKEN,
48
+ IMAGE_LOW_RES_TOKEN,
49
+ ]
50
+
51
+
52
+ class MolmoPointProcessorKwargs(ProcessingKwargs, total=False):
53
+ """Molmo2 processor kwargs"""
54
+ images_kwargs: Molmo2ImagesKwargs
55
+ videos_kwargs: Molmo2VideoProcessorKwargs
56
+ _defaults = {
57
+ "text_kwargs": {
58
+ "padding": False,
59
+ "return_mm_token_type_ids": True,
60
+ },
61
+ "videos_kwargs": {"return_metadata": True},
62
+ }
63
+
64
+
65
+ class MolmoPointProcessor(ProcessorMixin):
66
+ attributes = ["image_processor", "video_processor", "tokenizer"]
67
+ optional_attributes = [
68
+ "chat_template",
69
+ "time_mode",
70
+ "image_use_col_tokens",
71
+ "use_single_crop_col_tokens",
72
+ "use_single_crop_start_token",
73
+ "video_use_col_tokens",
74
+ "use_frame_special_tokens",
75
+ ]
76
+ image_processor_class = "AutoImageProcessor"
77
+ video_processor_class = "AutoVideoProcessor"
78
+ tokenizer_class = "AutoTokenizer"
79
+
80
+ def __init__(
81
+ self,
82
+ image_processor: Molmo2ImageProcessor = None,
83
+ video_processor: Molmo2VideoProcessor = None,
84
+ tokenizer: AutoTokenizer = None,
85
+ chat_template: Optional[str] = None,
86
+ image_use_col_tokens: Optional[bool] = True,
87
+ use_single_crop_col_tokens: Optional[bool] = None,
88
+ use_single_crop_start_token: Optional[bool] = True,
89
+ video_use_col_tokens: Optional[bool] = False,
90
+ use_frame_special_tokens: Optional[bool] = True,
91
+ **kwargs
92
+ ) -> None:
93
+ super().__init__(
94
+ image_processor,
95
+ video_processor,
96
+ tokenizer,
97
+ chat_template=chat_template,
98
+ image_use_col_tokens=image_use_col_tokens,
99
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
100
+ use_single_crop_start_token=use_single_crop_start_token,
101
+ video_use_col_tokens=video_use_col_tokens,
102
+ use_frame_special_tokens=use_frame_special_tokens,
103
+ )
104
+
105
+ self.image_placeholder_token = IMAGE_PROMPT
106
+ self.video_placeholder_token = VIDEO_PROMPT
107
+ self.image_token_ids = [
108
+ tokenizer.convert_tokens_to_ids(token)
109
+ for token in IMAGE_TOKENS
110
+ ]
111
+
112
+ def get_image_tokens(self, image_grid: np.ndarray):
113
+ resized_h, resized_w, height, width = image_grid
114
+ per_row = np.full(width, IMAGE_PATCH_TOKEN)
115
+ if self.image_use_col_tokens:
116
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
117
+ joint = [
118
+ [IM_START_TOKEN],
119
+ np.tile(per_row, [height]),
120
+ [IM_END_TOKEN],
121
+ ]
122
+ per_row = np.full(resized_w, IMAGE_PATCH_TOKEN)
123
+ use_single_crop_col_tokens = (
124
+ self.image_use_col_tokens
125
+ if self.use_single_crop_col_tokens is None
126
+ else self.use_single_crop_col_tokens
127
+ )
128
+ image_start_token = (
129
+ LOW_RES_IMAGE_START_TOKEN
130
+ if self.use_single_crop_start_token
131
+ else IM_START_TOKEN
132
+ )
133
+ if use_single_crop_col_tokens:
134
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
135
+ joint = [
136
+ [image_start_token],
137
+ np.tile(per_row, [resized_h]),
138
+ [IM_END_TOKEN],
139
+ ] + joint
140
+
141
+ return np.concatenate(joint)
142
+
143
+ def get_video_string(
144
+ self,
145
+ video_grid: np.ndarray,
146
+ timestamps: np.ndarray,
147
+ ):
148
+ if self.use_frame_special_tokens:
149
+ start_token_id = FRAME_START_TOKEN
150
+ end_token_id = FRAME_END_TOKEN
151
+ else:
152
+ start_token_id = IM_START_TOKEN
153
+ end_token_id = IM_END_TOKEN
154
+
155
+ num_frames, h, w = video_grid
156
+ video_string: str = ""
157
+ for frame_idx, frame_time in enumerate(timestamps):
158
+ # `per-frame-compact` time mode
159
+ prev_space = " " if frame_idx > 0 else ""
160
+ frame_prefix = prev_space + f"{frame_time:.1f} " # explicit whitespace before/after image tokens
161
+
162
+ video_string += frame_prefix
163
+ per_row = np.full(w, IMAGE_PATCH_TOKEN)
164
+ if self.video_use_col_tokens:
165
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
166
+ extra_tokens = np.tile(per_row, [h])
167
+ video_tokens = [
168
+ [start_token_id],
169
+ extra_tokens,
170
+ [end_token_id],
171
+ ]
172
+ video_string += "".join(np.concatenate(video_tokens, 0))
173
+
174
+ return video_string
175
+
176
+ def insert_bos(
177
+ self,
178
+ input_ids: np.ndarray,
179
+ attention_mask: np.ndarray,
180
+ bos_token_id: int,
181
+ pad_token_id: int,
182
+ ):
183
+ """
184
+ Args:
185
+ input_ids: [B, S] array with left padding
186
+ attention_mask: [B, S] array (0 for pad, 1 for valid)
187
+ bos_token_id: int
188
+ pad_token_id: int
189
+ Returns:
190
+ input_ids_out: [B, S] or [B, S+1] array with bos inserted if needed
191
+ attention_mask_out: same shape as input_ids_out
192
+ """
193
+
194
+ need_to_expand = len(input_ids.shape) == 1
195
+ if need_to_expand:
196
+ input_ids = input_ids[None, :]
197
+ attention_mask = attention_mask[None, :]
198
+
199
+ B, S = input_ids.shape
200
+
201
+ # Handle zero-length sequence
202
+ if S == 0:
203
+ new_input_ids = np.full((B, 1), bos_token_id, dtype=input_ids.dtype)
204
+ new_attention_mask = np.ones((B, 1), dtype=attention_mask.dtype)
205
+ if need_to_expand:
206
+ new_input_ids = new_input_ids[0]
207
+ new_attention_mask = new_attention_mask[0]
208
+ return new_input_ids, new_attention_mask
209
+
210
+ first_valid_index = (attention_mask == 1).argmax(axis=-1) # [B]
211
+ bos_already_present = np.all(input_ids[np.arange(B), first_valid_index] == bos_token_id)
212
+
213
+ if bos_already_present:
214
+ if need_to_expand:
215
+ input_ids = input_ids[0]
216
+ attention_mask = attention_mask[0]
217
+ return input_ids, attention_mask
218
+ else:
219
+ new_input_ids = np.full((B, S+1), pad_token_id, dtype=input_ids.dtype)
220
+ new_attention_mask = np.zeros((B, S+1), dtype=attention_mask.dtype)
221
+
222
+ src_idx = np.tile(np.arange(S), (B, 1)) # [B, S]
223
+ valid_mask = src_idx >= first_valid_index[:, None] # [B, S]
224
+ tgt_idx = src_idx + 1 # shit right
225
+ batch_idx = np.tile(np.arange(B)[:, None], (1, S)) # [B, S]
226
+
227
+ # flatten valid_positions
228
+ flat_vals = input_ids[valid_mask]
229
+ flat_batch = batch_idx[valid_mask]
230
+ flat_tgt = tgt_idx[valid_mask]
231
+
232
+ new_input_ids[flat_batch, flat_tgt] = flat_vals
233
+ new_attention_mask[flat_batch, flat_tgt] = 1
234
+
235
+ insert_pos = first_valid_index
236
+ new_input_ids[np.arange(B), insert_pos] = bos_token_id
237
+ new_attention_mask[np.arange(B), insert_pos] = 1
238
+
239
+ if need_to_expand:
240
+ new_input_ids = new_input_ids[0]
241
+ new_attention_mask = new_attention_mask[0]
242
+
243
+ return new_input_ids, new_attention_mask
244
+
245
+ def __call__(
246
+ self,
247
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
248
+ images: ImageInput = None,
249
+ videos: VideoInput = None,
250
+ return_subpatch_mapping: bool = False,
251
+ **kwargs: Unpack[MolmoPointProcessorKwargs],
252
+ ) -> BatchFeature:
253
+ """
254
+
255
+ Args:
256
+ text (`str`, `list[str]`, `list[list[str]]`):
257
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
258
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
259
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
260
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
261
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
262
+ tensor. Both channels-first and channels-last formats are supported.
263
+ videos (`dict[str, Any]` or `list[dict[str, Any]]`):
264
+ The video or batch of videos to be prepared. Each video can be a dictionary with the following keys:
265
+ - `"frames"`: `np.ndarray` of shape (T, H, W, 3)
266
+ - `"timestamps"`: `np.ndarray` of shape (T,)
267
+ - `"sampled_fps"`: `float` (optional)
268
+ - `"sampling_augmentation"`: `str` (optional)
269
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
270
+ If set, will return tensors of a particular framework. Acceptable values are:
271
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
272
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
273
+ - `'np'`: Return NumPy `np.ndarray` objects.
274
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
275
+
276
+ Returns:
277
+ `BatchFeature`: A [`BatchFeature`] with the following fields:
278
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
279
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
280
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`).
281
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
282
+ - **image_token_pooling** -- Indices of the patches in `image_grids` to pool for each token in `image_tokens`.
283
+ Returned when `images` is not `None`.
284
+ - **image_grids** -- Grids of images. Returned when `images` is not `None`.
285
+ - **image_num_crops** -- Number of crops for each image. Returned when `images` is not `None`.
286
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
287
+ - **video_token_pooling** -- Indices of the patches in `video_grids` to pool for each token in `video_tokens`.
288
+ Returned when `videos` is not `None`.
289
+ - **video_grids** -- Grids of videos. Returned when `videos` is not `None`.
290
+ """
291
+
292
+ output_kwargs = self._merge_kwargs(
293
+ MolmoPointProcessorKwargs,
294
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
295
+ **kwargs,
296
+ )
297
+
298
+ subpatch_mapping = None
299
+ if images is not None:
300
+ if return_subpatch_mapping:
301
+ image_inputs, subpatch_mapping = self.image_processor(images, **output_kwargs["images_kwargs"], return_subpatch_mapping=return_subpatch_mapping)
302
+ else:
303
+ image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
304
+ image_grids = image_inputs["image_grids"]
305
+ else:
306
+ image_inputs = {}
307
+ image_grids = None
308
+
309
+ if videos is not None:
310
+ videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
311
+ video_grids = videos_inputs["video_grids"]
312
+ # If user has not requested video metadata, pop it
313
+ if "return_metadata" not in kwargs:
314
+ video_metadata = videos_inputs.pop("video_metadata")
315
+ else:
316
+ video_metadata = videos_inputs["video_metadata"]
317
+ else:
318
+ videos_inputs = {}
319
+ video_grids = None
320
+
321
+ if not isinstance(text, list):
322
+ text = [text]
323
+
324
+ text = text.copy() # below lines change text in-place
325
+
326
+ if image_grids is not None:
327
+ index = 0
328
+ for i in range(len(text)):
329
+ num_images = text[i].count(self.image_placeholder_token)
330
+ image_grids_i = image_grids[index:index+num_images]
331
+ for image_grid in image_grids_i:
332
+ image_tokens = self.get_image_tokens(image_grid)
333
+ image_string = "".join(image_tokens)
334
+ text[i] = text[i].replace(self.image_placeholder_token, image_string, 1)
335
+ index += num_images
336
+
337
+ if video_grids is not None:
338
+ index = 0
339
+ for i in range(len(text)):
340
+ num_videos = text[i].count(self.video_placeholder_token)
341
+ assert num_videos in {0, 1}, "At most one video is supported for now"
342
+ video_grids_i = video_grids[index:index+num_videos]
343
+ metadata_i = video_metadata[index:index+num_videos]
344
+ for video_grid, metadata in zip(video_grids_i, metadata_i):
345
+ video_string = self.get_video_string(
346
+ video_grid,
347
+ metadata.timestamps,
348
+ )
349
+ text[i] = text[i].replace(self.video_placeholder_token, video_string, 1)
350
+ index += num_videos
351
+
352
+ return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
353
+ return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
354
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
355
+
356
+ input_ids = text_inputs["input_ids"]
357
+ attention_mask = text_inputs["attention_mask"]
358
+
359
+ input_ids = np.array(input_ids)
360
+ attention_mask = np.array(attention_mask)
361
+
362
+ bos = self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
363
+ input_ids, attention_mask = self.insert_bos(
364
+ input_ids, attention_mask, bos, self.tokenizer.pad_token_id
365
+ )
366
+
367
+ if return_mm_token_type_ids:
368
+ image_tokens = np.array(self.image_token_ids).astype(input_ids.dtype)
369
+ token_type_ids = np.any(input_ids[:, :, None] == image_tokens[None, None, :], axis=-1)
370
+ text_inputs["token_type_ids"] = token_type_ids.tolist()
371
+
372
+ text_inputs["input_ids"] = input_ids.tolist()
373
+ text_inputs["attention_mask"] = attention_mask.tolist()
374
+ features = BatchFeature(
375
+ data={**text_inputs, **image_inputs, **videos_inputs},
376
+ tensor_type=return_tensors,
377
+ )
378
+ if return_subpatch_mapping:
379
+ return features, subpatch_mapping
380
+ return features
381
+
382
+ def post_process_image_text_to_text(
383
+ self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
384
+ ):
385
+ """
386
+ Post-process the output of the model to decode the text.
387
+
388
+ Args:
389
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
390
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
391
+ or `(sequence_length,)`.
392
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
393
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
394
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
395
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
396
+ **kwargs:
397
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
398
+
399
+ Returns:
400
+ `list[str]`: The decoded text.
401
+ """
402
+ return self.tokenizer.batch_decode(
403
+ generated_outputs,
404
+ skip_special_tokens=skip_special_tokens,
405
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
406
+ **kwargs,
407
+ )
408
+
409
+
410
+ MolmoPointProcessor.register_for_auto_class()
processor_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
4
+ },
5
+ "image_use_col_tokens": true,
6
+ "processor_class": "Molmo2Processor",
7
+ "use_frame_special_tokens": true,
8
+ "use_low_res_token_for_global_crops": true,
9
+ "use_single_crop_col_tokens": null,
10
+ "use_single_crop_start_token": false,
11
+ "video_use_col_tokens": false
12
+ }
random_1gb.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb690f2ed7814896bf17cc622a90c24789f83838bb5770bbfaa7e0978287091b
3
  size 1073741824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:178e8c9bcf3a01d6c7d2914ca00a0887f79ea3dbc0528e53443c3f7509840deb
3
  size 1073741824
special_tokens_map.json ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "|<EXTRA_TOKENS_0>|",
4
+ "|<EXTRA_TOKENS_1>|",
5
+ "|<EXTRA_TOKENS_2>|",
6
+ "|<EXTRA_TOKENS_3>|",
7
+ "|<EXTRA_TOKENS_4>|",
8
+ "|<EXTRA_TOKENS_5>|",
9
+ "|<EXTRA_TOKENS_6>|",
10
+ "|<EXTRA_TOKENS_7>|",
11
+ "|<EXTRA_TOKENS_8>|",
12
+ "|<EXTRA_TOKENS_9>|",
13
+ "|<EXTRA_TOKENS_10>|",
14
+ "|<EXTRA_TOKENS_11>|",
15
+ "|<EXTRA_TOKENS_12>|",
16
+ "|<EXTRA_TOKENS_13>|",
17
+ "|<EXTRA_TOKENS_14>|",
18
+ "|<EXTRA_TOKENS_15>|",
19
+ "|<EXTRA_TOKENS_16>|",
20
+ "|<EXTRA_TOKENS_17>|",
21
+ "|<EXTRA_TOKENS_18>|",
22
+ "|<EXTRA_TOKENS_19>|",
23
+ "|<EXTRA_TOKENS_20>|",
24
+ "|<EXTRA_TOKENS_21>|",
25
+ "|<EXTRA_TOKENS_22>|",
26
+ "|<EXTRA_TOKENS_23>|",
27
+ "|<EXTRA_TOKENS_24>|",
28
+ "|<EXTRA_TOKENS_25>|",
29
+ "|<EXTRA_TOKENS_26>|",
30
+ "|<EXTRA_TOKENS_27>|",
31
+ "|<EXTRA_TOKENS_28>|",
32
+ "|<EXTRA_TOKENS_29>|",
33
+ "|<EXTRA_TOKENS_30>|",
34
+ "|<EXTRA_TOKENS_31>|",
35
+ "|<EXTRA_TOKENS_32>|",
36
+ "|<EXTRA_TOKENS_33>|",
37
+ "|<EXTRA_TOKENS_34>|",
38
+ "|<EXTRA_TOKENS_35>|",
39
+ "|<EXTRA_TOKENS_36>|",
40
+ "|<EXTRA_TOKENS_37>|",
41
+ "|<EXTRA_TOKENS_38>|",
42
+ "|<EXTRA_TOKENS_39>|",
43
+ "|<EXTRA_TOKENS_40>|",
44
+ "|<EXTRA_TOKENS_41>|",
45
+ "|<EXTRA_TOKENS_42>|",
46
+ "|<EXTRA_TOKENS_43>|",
47
+ "|<EXTRA_TOKENS_44>|",
48
+ "|<EXTRA_TOKENS_45>|",
49
+ "|<EXTRA_TOKENS_46>|",
50
+ "|<EXTRA_TOKENS_47>|",
51
+ "|<EXTRA_TOKENS_48>|",
52
+ "|<EXTRA_TOKENS_49>|",
53
+ "|<EXTRA_TOKENS_50>|",
54
+ "|<EXTRA_TOKENS_51>|",
55
+ "|<EXTRA_TOKENS_52>|",
56
+ "|<EXTRA_TOKENS_53>|",
57
+ "|<EXTRA_TOKENS_54>|",
58
+ "|<EXTRA_TOKENS_55>|",
59
+ "|<EXTRA_TOKENS_56>|",
60
+ "|<EXTRA_TOKENS_57>|",
61
+ "|<EXTRA_TOKENS_58>|",
62
+ "|<EXTRA_TOKENS_59>|",
63
+ "|<EXTRA_TOKENS_60>|",
64
+ "|<EXTRA_TOKENS_61>|",
65
+ "|<EXTRA_TOKENS_62>|",
66
+ "|<EXTRA_TOKENS_63>|",
67
+ "|<EXTRA_TOKENS_64>|",
68
+ "|<EXTRA_TOKENS_65>|",
69
+ "|<EXTRA_TOKENS_66>|",
70
+ "|<EXTRA_TOKENS_67>|",
71
+ "|<EXTRA_TOKENS_68>|",
72
+ "|<EXTRA_TOKENS_69>|",
73
+ "|<EXTRA_TOKENS_70>|",
74
+ "|<EXTRA_TOKENS_71>|",
75
+ "|<EXTRA_TOKENS_72>|",
76
+ "|<EXTRA_TOKENS_73>|",
77
+ "|<EXTRA_TOKENS_74>|",
78
+ "|<EXTRA_TOKENS_75>|",
79
+ "|<EXTRA_TOKENS_76>|",
80
+ "|<EXTRA_TOKENS_77>|",
81
+ "|<EXTRA_TOKENS_78>|",
82
+ "|<EXTRA_TOKENS_79>|",
83
+ "|<EXTRA_TOKENS_80>|",
84
+ "|<EXTRA_TOKENS_81>|",
85
+ "|<EXTRA_TOKENS_82>|",
86
+ "|<EXTRA_TOKENS_83>|",
87
+ "|<EXTRA_TOKENS_84>|",
88
+ "|<EXTRA_TOKENS_85>|",
89
+ "|<EXTRA_TOKENS_86>|",
90
+ "|<EXTRA_TOKENS_87>|",
91
+ "|<EXTRA_TOKENS_88>|",
92
+ "|<EXTRA_TOKENS_89>|",
93
+ "|<EXTRA_TOKENS_90>|",
94
+ "|<EXTRA_TOKENS_91>|",
95
+ "|<EXTRA_TOKENS_92>|",
96
+ "|<EXTRA_TOKENS_93>|",
97
+ "|<EXTRA_TOKENS_94>|",
98
+ "|<EXTRA_TOKENS_95>|",
99
+ "|<EXTRA_TOKENS_96>|",
100
+ "|<EXTRA_TOKENS_97>|",
101
+ "|<EXTRA_TOKENS_98>|",
102
+ "|<EXTRA_TOKENS_99>|",
103
+ "|<EXTRA_TOKENS_100>|",
104
+ "|<EXTRA_TOKENS_101>|",
105
+ "|<EXTRA_TOKENS_102>|",
106
+ "|<EXTRA_TOKENS_103>|",
107
+ "|<EXTRA_TOKENS_104>|",
108
+ "|<EXTRA_TOKENS_105>|",
109
+ "|<EXTRA_TOKENS_106>|",
110
+ "|<EXTRA_TOKENS_107>|",
111
+ "|<EXTRA_TOKENS_108>|",
112
+ "|<EXTRA_TOKENS_109>|",
113
+ "|<EXTRA_TOKENS_110>|",
114
+ "|<EXTRA_TOKENS_111>|",
115
+ "|<EXTRA_TOKENS_112>|",
116
+ "|<EXTRA_TOKENS_113>|",
117
+ "|<EXTRA_TOKENS_114>|",
118
+ "|<EXTRA_TOKENS_115>|",
119
+ "|<EXTRA_TOKENS_116>|",
120
+ "|<EXTRA_TOKENS_117>|",
121
+ "|<EXTRA_TOKENS_118>|",
122
+ "|<EXTRA_TOKENS_119>|",
123
+ "|<EXTRA_TOKENS_120>|",
124
+ "|<EXTRA_TOKENS_121>|",
125
+ "|<EXTRA_TOKENS_122>|",
126
+ "|<EXTRA_TOKENS_123>|",
127
+ "|<EXTRA_TOKENS_124>|",
128
+ "|<EXTRA_TOKENS_125>|",
129
+ "|<EXTRA_TOKENS_126>|",
130
+ "|<EXTRA_TOKENS_127>|",
131
+ "|<EXTRA_TOKENS_128>|",
132
+ "|<EXTRA_TOKENS_129>|",
133
+ "|<EXTRA_TOKENS_130>|",
134
+ "|<EXTRA_TOKENS_131>|",
135
+ "|<EXTRA_TOKENS_132>|",
136
+ "|<EXTRA_TOKENS_133>|",
137
+ "|<EXTRA_TOKENS_134>|",
138
+ "|<EXTRA_TOKENS_135>|",
139
+ "|<EXTRA_TOKENS_136>|",
140
+ "|<EXTRA_TOKENS_137>|",
141
+ "|<EXTRA_TOKENS_138>|",
142
+ "|<EXTRA_TOKENS_139>|",
143
+ "|<EXTRA_TOKENS_140>|",
144
+ "|<EXTRA_TOKENS_141>|",
145
+ "|<EXTRA_TOKENS_142>|",
146
+ "|<EXTRA_TOKENS_143>|",
147
+ "|<EXTRA_TOKENS_144>|",
148
+ "|<EXTRA_TOKENS_145>|",
149
+ "|<EXTRA_TOKENS_146>|",
150
+ "|<EXTRA_TOKENS_147>|",
151
+ "|<EXTRA_TOKENS_148>|",
152
+ "|<EXTRA_TOKENS_149>|",
153
+ "|<EXTRA_TOKENS_150>|",
154
+ "|<EXTRA_TOKENS_151>|",
155
+ "|<EXTRA_TOKENS_152>|",
156
+ "|<EXTRA_TOKENS_153>|",
157
+ "|<EXTRA_TOKENS_154>|",
158
+ "|<EXTRA_TOKENS_155>|",
159
+ "|<EXTRA_TOKENS_156>|",
160
+ "|<EXTRA_TOKENS_157>|",
161
+ "|<EXTRA_TOKENS_158>|",
162
+ "|<EXTRA_TOKENS_159>|",
163
+ "|<EXTRA_TOKENS_160>|",
164
+ "|<EXTRA_TOKENS_161>|",
165
+ "|<EXTRA_TOKENS_162>|",
166
+ "|<EXTRA_TOKENS_163>|",
167
+ "|<EXTRA_TOKENS_164>|",
168
+ "|<EXTRA_TOKENS_165>|",
169
+ "|<EXTRA_TOKENS_166>|",
170
+ "|<EXTRA_TOKENS_167>|",
171
+ "|<EXTRA_TOKENS_168>|",
172
+ "|<EXTRA_TOKENS_169>|",
173
+ "|<EXTRA_TOKENS_170>|",
174
+ "|<EXTRA_TOKENS_171>|",
175
+ "|<EXTRA_TOKENS_172>|",
176
+ "|<EXTRA_TOKENS_173>|",
177
+ "|<EXTRA_TOKENS_174>|",
178
+ "|<EXTRA_TOKENS_175>|",
179
+ "|<EXTRA_TOKENS_176>|",
180
+ "|<EXTRA_TOKENS_177>|",
181
+ "|<EXTRA_TOKENS_178>|",
182
+ "|<EXTRA_TOKENS_179>|",
183
+ "|<EXTRA_TOKENS_180>|",
184
+ "|<EXTRA_TOKENS_181>|",
185
+ "|<EXTRA_TOKENS_182>|",
186
+ "|<EXTRA_TOKENS_183>|",
187
+ "|<EXTRA_TOKENS_184>|",
188
+ "|<EXTRA_TOKENS_185>|",
189
+ "|<EXTRA_TOKENS_186>|",
190
+ "|<EXTRA_TOKENS_187>|",
191
+ "|<EXTRA_TOKENS_188>|",
192
+ "|<EXTRA_TOKENS_189>|",
193
+ "|<EXTRA_TOKENS_190>|",
194
+ "|<EXTRA_TOKENS_191>|",
195
+ "|<EXTRA_TOKENS_192>|",
196
+ "|<EXTRA_TOKENS_193>|",
197
+ "|<EXTRA_TOKENS_194>|",
198
+ "|<EXTRA_TOKENS_195>|",
199
+ "|<EXTRA_TOKENS_196>|",
200
+ "|<EXTRA_TOKENS_197>|",
201
+ "|<EXTRA_TOKENS_198>|",
202
+ "|<EXTRA_TOKENS_199>|",
203
+ "|<EXTRA_TOKENS_200>|",
204
+ "|<EXTRA_TOKENS_201>|",
205
+ "|<EXTRA_TOKENS_202>|",
206
+ "|<EXTRA_TOKENS_203>|",
207
+ "|<EXTRA_TOKENS_204>|",
208
+ "|<EXTRA_TOKENS_205>|",
209
+ "|<EXTRA_TOKENS_206>|",
210
+ "|<EXTRA_TOKENS_207>|",
211
+ "|<EXTRA_TOKENS_208>|",
212
+ "|<EXTRA_TOKENS_209>|",
213
+ "|<EXTRA_TOKENS_210>|",
214
+ "|<EXTRA_TOKENS_211>|",
215
+ "|<EXTRA_TOKENS_212>|",
216
+ "|<EXTRA_TOKENS_213>|",
217
+ "|<EXTRA_TOKENS_214>|",
218
+ "|<EXTRA_TOKENS_215>|",
219
+ "|<EXTRA_TOKENS_216>|",
220
+ "|<EXTRA_TOKENS_217>|",
221
+ "|<EXTRA_TOKENS_218>|",
222
+ "|<EXTRA_TOKENS_219>|",
223
+ "|<EXTRA_TOKENS_220>|",
224
+ "|<EXTRA_TOKENS_221>|",
225
+ "|<EXTRA_TOKENS_222>|",
226
+ "|<EXTRA_TOKENS_223>|",
227
+ "|<EXTRA_TOKENS_224>|",
228
+ "|<EXTRA_TOKENS_225>|",
229
+ "|<EXTRA_TOKENS_226>|",
230
+ "|<EXTRA_TOKENS_227>|",
231
+ "|<EXTRA_TOKENS_228>|",
232
+ "|<EXTRA_TOKENS_229>|",
233
+ "|<EXTRA_TOKENS_230>|",
234
+ "|<EXTRA_TOKENS_231>|",
235
+ "|<EXTRA_TOKENS_232>|",
236
+ "|<EXTRA_TOKENS_233>|",
237
+ "|<EXTRA_TOKENS_234>|",
238
+ "|<EXTRA_TOKENS_235>|",
239
+ "|<EXTRA_TOKENS_236>|",
240
+ "|<EXTRA_TOKENS_237>|",
241
+ "|<EXTRA_TOKENS_238>|",
242
+ "|<EXTRA_TOKENS_239>|",
243
+ "|<EXTRA_TOKENS_240>|",
244
+ "|<EXTRA_TOKENS_241>|",
245
+ "|<EXTRA_TOKENS_242>|",
246
+ "|<EXTRA_TOKENS_243>|",
247
+ "|<EXTRA_TOKENS_244>|",
248
+ "|<EXTRA_TOKENS_245>|",
249
+ "|<EXTRA_TOKENS_246>|",
250
+ "|<EXTRA_TOKENS_247>|",
251
+ "|<EXTRA_TOKENS_248>|",
252
+ "|<EXTRA_TOKENS_249>|",
253
+ "|<EXTRA_TOKENS_250>|",
254
+ "|<EXTRA_TOKENS_251>|",
255
+ "|<EXTRA_TOKENS_252>|",
256
+ "|<EXTRA_TOKENS_253>|",
257
+ "|<EXTRA_TOKENS_254>|",
258
+ "|<EXTRA_TOKENS_255>|",
259
+ "|<EXTRA_TOKENS_256>|",
260
+ "|<EXTRA_TOKENS_257>|",
261
+ "|<EXTRA_TOKENS_258>|",
262
+ "|<EXTRA_TOKENS_259>|",
263
+ "|<EXTRA_TOKENS_260>|",
264
+ "|<EXTRA_TOKENS_261>|",
265
+ "|<EXTRA_TOKENS_262>|",
266
+ "|<EXTRA_TOKENS_263>|",
267
+ "|<EXTRA_TOKENS_264>|",
268
+ "|<EXTRA_TOKENS_265>|",
269
+ "|<EXTRA_TOKENS_266>|",
270
+ "<im_start>",
271
+ "<im_end>",
272
+ "<im_patch>",
273
+ "<im_col>",
274
+ "<low_res_im_start>",
275
+ "<|image|>",
276
+ "<im_low>",
277
+ "<frame_start>",
278
+ "<frame_end>",
279
+ "<|video|>",
280
+ "<|points|>",
281
+ "<|token_index|>",
282
+ "<|vit_index|>",
283
+ "<|vit_loc|>"
284
+ ],
285
+ "bos_token": "<|im_end|>",
286
+ "eos_token": {
287
+ "content": "<|im_end|>",
288
+ "lstrip": false,
289
+ "normalized": false,
290
+ "rstrip": false,
291
+ "single_word": false
292
+ },
293
+ "pad_token": {
294
+ "content": "<|endoftext|>",
295
+ "lstrip": false,
296
+ "normalized": false,
297
+ "rstrip": false,
298
+ "single_word": false
299
+ }
300
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f437033cf8ca3315943460f7b7681d01130795107d9a99dc124fd9d6898e932
3
+ size 17417468
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
video_preprocessor_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_molmo2.Molmo2Processor",
4
+ "AutoVideoProcessor": "video_processing_molmo2.Molmo2VideoProcessor"
5
+ },
6
+ "crop_size": null,
7
+ "data_format": "channels_first",
8
+ "default_to_square": true,
9
+ "device": null,
10
+ "do_center_crop": null,
11
+ "do_convert_rgb": true,
12
+ "do_normalize": true,
13
+ "do_rescale": true,
14
+ "do_resize": true,
15
+ "do_sample_frames": true,
16
+ "fps": null,
17
+ "frame_sample_mode": "uniform_last_frame",
18
+ "image_mean": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "image_std": [
24
+ 0.5,
25
+ 0.5,
26
+ 0.5
27
+ ],
28
+ "input_data_format": null,
29
+ "max_fps": 2.0,
30
+ "num_frames": 384,
31
+ "pad_size": null,
32
+ "patch_size": 14,
33
+ "pooling_size": [
34
+ 3,
35
+ 3
36
+ ],
37
+ "processor_class": "Molmo2Processor",
38
+ "resample": 2,
39
+ "rescale_factor": 0.00392156862745098,
40
+ "return_metadata": false,
41
+ "sampling_fps": 2,
42
+ "size": {
43
+ "height": 378,
44
+ "width": 378
45
+ },
46
+ "video_metadata": null,
47
+ "video_processor_type": "Molmo2VideoProcessor"
48
+ }
video_processing_molmo2.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Video processor class for Molmo2"""
2
+ from functools import partial
3
+ import os
4
+ import warnings
5
+ from contextlib import redirect_stdout
6
+ from io import BytesIO
7
+ from urllib.parse import urlparse
8
+ from typing import Optional, Union, Callable
9
+
10
+ import numpy as np
11
+ import requests
12
+ import einops
13
+ import torch
14
+ import torchvision.transforms
15
+
16
+ from transformers.image_utils import (
17
+ IMAGENET_STANDARD_MEAN,
18
+ IMAGENET_STANDARD_STD,
19
+ ImageInput,
20
+ PILImageResampling,
21
+ SizeDict,
22
+ validate_kwargs,
23
+ )
24
+ from transformers.video_utils import (
25
+ VideoInput,
26
+ is_valid_video,
27
+ make_batched_videos,
28
+ make_batched_metadata,
29
+ VideoMetadata,
30
+ )
31
+ from transformers.processing_utils import Unpack, VideosKwargs
32
+ from transformers.video_processing_utils import BaseVideoProcessor
33
+ from transformers.utils import logging
34
+ from transformers.feature_extraction_utils import BatchFeature
35
+ from transformers.utils import (
36
+ is_av_available,
37
+ is_decord_available,
38
+ is_torchcodec_available,
39
+ is_yt_dlp_available,
40
+ TensorType,
41
+ logging,
42
+ to_numpy,
43
+ )
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ MAX_VIDEO_FPS = 8
49
+
50
+
51
+ def normalize_image(
52
+ image: np.ndarray,
53
+ image_mean: list[float],
54
+ image_std: list[float],
55
+ ) -> np.ndarray:
56
+ image -= np.array(image_mean, dtype=np.float32)[None, None, :]
57
+ image /= np.array(image_std, dtype=np.float32)[None, None, :]
58
+ return image
59
+
60
+
61
+ def resize_image(
62
+ image: np.ndarray,
63
+ desired_output_size: list[int],
64
+ resample: PILImageResampling,
65
+ ) -> np.ndarray:
66
+ if len(image.shape) == 3:
67
+ is_video = False
68
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
69
+ else:
70
+ is_video = True
71
+ image = torch.permute(torch.from_numpy(image), [0, 3, 1, 2])
72
+ dtype = image.dtype
73
+ if torch.is_floating_point(image):
74
+ in_min = 0.0
75
+ in_max = 1.0
76
+ resized = torchvision.transforms.Resize(
77
+ desired_output_size,
78
+ resample,
79
+ antialias=False,
80
+ )(image)
81
+ resized = torch.clip(resized, 0.0, 1.0).to(dtype)
82
+ else:
83
+ assert image.dtype == torch.uint8, "SigLIP expects float images or uint8 images, but got {}".format(image.dtype)
84
+ in_min = 0.0
85
+ in_max = 255.0
86
+ resized = torchvision.transforms.Resize(
87
+ desired_output_size,
88
+ resample,
89
+ antialias=False,
90
+ )(image)
91
+ resized = torch.clip(resized, 0, 255).to(dtype)
92
+
93
+ resized = resized.to(torch.float32)
94
+ resized = (resized - in_min) / (in_max - in_min)
95
+
96
+ if is_video:
97
+ resized = torch.permute(resized, [0, 2, 3, 1]).numpy()
98
+ else:
99
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
100
+
101
+ return resized
102
+
103
+
104
+ def build_resized_image(
105
+ image: np.ndarray,
106
+ base_image_input_size: list[int],
107
+ resample: PILImageResampling,
108
+ image_mean: list[float],
109
+ image_std: list[float],
110
+ image_patch_size: int,
111
+ ) -> tuple[np.ndarray, np.ndarray]:
112
+ resized = resize_image(
113
+ image, base_image_input_size, resample,
114
+ )
115
+ resized = normalize_image(resized, image_mean, image_std)
116
+ if len(resized.shape) == 3:
117
+ resized = np.expand_dims(resized, 0)
118
+ crop_patch_w = base_image_input_size[1] // image_patch_size
119
+ crop_patch_h = base_image_input_size[0] // image_patch_size
120
+ resize_idx = np.arange(crop_patch_w*crop_patch_h).reshape([crop_patch_h, crop_patch_w])
121
+ return resized, resize_idx
122
+
123
+
124
+ def batch_pixels_to_patches(array: np.ndarray, patch_size: int) -> np.ndarray:
125
+ """Reshape images of [n_images, h, w, 3] -> [n_images, n_patches, pixels_per_patch]"""
126
+ if len(array.shape) == 3:
127
+ n_crops, h, w = array.shape
128
+ h_patches = h//patch_size
129
+ w_patches = w//patch_size
130
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size])
131
+ array = np.transpose(array, [0, 1, 3, 2, 4])
132
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size])
133
+ return array
134
+ else:
135
+ n_crops, h, w, c = array.shape
136
+ h_patches = h//patch_size
137
+ w_patches = w//patch_size
138
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size, c])
139
+ array = np.transpose(array, [0, 1, 3, 2, 4, 5])
140
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size*c])
141
+ return array
142
+
143
+
144
+ def arange_for_pooling(
145
+ idx_arr: np.ndarray,
146
+ pool_h: int,
147
+ pool_w: int,
148
+ ) -> np.ndarray:
149
+ h_pad = pool_h * ((idx_arr.shape[0] + pool_h - 1) // pool_h) - idx_arr.shape[0]
150
+ w_pad = pool_w * ((idx_arr.shape[1] + pool_w - 1) // pool_w) - idx_arr.shape[1]
151
+ idx_arr = np.pad(idx_arr, [[h_pad//2, (h_pad+1)//2], [w_pad//2, (w_pad+1)//2]],
152
+ mode='constant',constant_values=-1)
153
+ return einops.rearrange(
154
+ idx_arr, "(h dh) (w dw) -> h w (dh dw)", dh=pool_h, dw=pool_w)
155
+
156
+
157
+ def image_to_patches_and_grids(
158
+ image: ImageInput,
159
+ base_image_input_size: list[int],
160
+ resample: PILImageResampling,
161
+ image_mean: list[float],
162
+ image_std: list[float],
163
+ image_patch_size: int,
164
+ image_pooling_w: int,
165
+ image_pooling_h: int,
166
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
167
+ """
168
+ :return image_grids, the shape of each image after pooling
169
+ :return crops, the image crops to processes with the ViT
170
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
171
+ patches in `crops` to pool for that token, masked with -1
172
+ """
173
+ if isinstance(base_image_input_size, int):
174
+ base_image_input_size = (base_image_input_size, base_image_input_size)
175
+
176
+ pooling_w = image_pooling_w
177
+ pooling_h = image_pooling_h
178
+
179
+ resized, resize_idx = build_resized_image(
180
+ image,
181
+ base_image_input_size,
182
+ resample,
183
+ image_mean,
184
+ image_std,
185
+ image_patch_size,
186
+ )
187
+ pooling_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
188
+ h, w = pooling_idx.shape[:2]
189
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
190
+ image_grid = [h, w]
191
+ return (
192
+ image_grid,
193
+ batch_pixels_to_patches(resized, image_patch_size),
194
+ pooling_idx,
195
+ )
196
+
197
+
198
+ def get_candidate_target_fps(
199
+ video_fps: Union[int, float],
200
+ sampling_fps: Union[int, float],
201
+ max_fps: Union[int, float] = MAX_VIDEO_FPS,
202
+ ) -> list[float]:
203
+ """
204
+ Return the subset of `video_fps` factors that remain multiples of `sampling_fps`.
205
+
206
+ Examples:
207
+ >>> get_candidate_target_fps(video_fps=6, sampling_fps=2)
208
+ [2, 6]
209
+ >>> get_candidate_target_fps(video_fps=5, sampling_fps=1)
210
+ [1, 5]
211
+ >>> get_candidate_target_fps(video_fps=2, sampling_fps=2)
212
+ [2]
213
+ >>> get_candidate_target_fps(video_fps=5, sampling_fps=2)
214
+ Traceback (most recent call last):
215
+ ...
216
+ ValueError: sampling_fps=2 must divide video_fps=5 to produce consistent frame steps.
217
+ """
218
+ video_fps = int(video_fps)
219
+ sampling_fps = int(sampling_fps)
220
+ max_fps = int(max_fps)
221
+
222
+ if sampling_fps is None:
223
+ raise ValueError("sampling_fps must be provided")
224
+ if video_fps <= 0 or sampling_fps <= 0:
225
+ raise ValueError(f"video_fps and sampling_fps must be positive (got {video_fps}, {sampling_fps})")
226
+ if video_fps % sampling_fps != 0:
227
+ raise ValueError(f"sampling_fps={sampling_fps} must divide video_fps={video_fps}.")
228
+
229
+ candidates = []
230
+ for candidate in range(sampling_fps, video_fps + 1, sampling_fps):
231
+ if candidate > max_fps:
232
+ break
233
+ if video_fps % candidate == 0:
234
+ candidates.append(float(candidate))
235
+
236
+ return candidates
237
+
238
+
239
+ def read_video_decord(
240
+ video_path,
241
+ sample_timestamps_fn: Callable,
242
+ **kwargs,
243
+ ) -> np.ndarray:
244
+ """
245
+ Decode a video using the Decord backend.
246
+
247
+ Args:
248
+ video_path (`str`):
249
+ Path to the video file.
250
+ sample_timestamps_fn (`Callable`):
251
+ A callable function that will return timestamps at which the video should be sampled.
252
+
253
+ Returns:
254
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
255
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
256
+ - `VideoMetadata` object.
257
+ """
258
+ # Lazy import from decord
259
+ import importlib
260
+ decord = importlib.import_module("decord")
261
+
262
+ vr = decord.VideoReader(uri=video_path, ctx=decord.cpu(0)) # decord has problems with gpu
263
+ video_fps = vr.get_avg_fps()
264
+ total_num_frames = len(vr)
265
+ time_stamps = vr.get_frame_timestamp(list(range(len(vr))))
266
+ duration = time_stamps[-1][1] - time_stamps[0][0]
267
+
268
+ metadata = VideoMetadata(
269
+ total_num_frames=int(total_num_frames),
270
+ fps=float(video_fps),
271
+ duration=float(duration),
272
+ video_backend="decord",
273
+ )
274
+
275
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
276
+ target_timestamps = np.array(target_timestamps)
277
+ offset = time_stamps[0, 0]
278
+
279
+ ix = np.searchsorted(time_stamps[:, 1], target_timestamps + offset, side='right')
280
+ ix = np.minimum(ix, len(time_stamps) - 1)
281
+
282
+ video = vr.get_batch(ix).asnumpy()
283
+ metadata.update(
284
+ {
285
+ "frames_indices": target_timestamps * video_fps,
286
+ "height": video.shape[1],
287
+ "width": video.shape[2],
288
+ }
289
+ )
290
+ return video, metadata
291
+
292
+
293
+ def read_video_torchcodec(
294
+ video_path,
295
+ sample_timestamps_fn: Callable,
296
+ **kwargs,
297
+ ) -> np.ndarray:
298
+ """
299
+ Decode a video using torchcodec decoder.
300
+
301
+ Args:
302
+ video_path (`str`):
303
+ Path to the video file.
304
+ sample_timestamps_fn (`Callable`):
305
+ A callable function that will return timestamps at which the video should be sampled.
306
+
307
+ Returns:
308
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
309
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
310
+ - `VideoMetadata` object.
311
+ """
312
+ # Lazy import torchcodec
313
+ import importlib
314
+ torchcodec = importlib.import_module("torchcodec")
315
+
316
+ decoder = torchcodec.decoders.VideoDecoder(
317
+ video_path,
318
+ # Interestingly `exact` mode takes less than approximate when we load the whole video
319
+ seek_mode="exact",
320
+ # Allow FFmpeg decide on the number of threads for efficiency
321
+ num_ffmpeg_threads=0,
322
+ )
323
+ # If the first frame starts at > 0, we effectively clip the video starting at that time
324
+ # since (most) video players would also skip to that time
325
+ time_offset = decoder.metadata.begin_stream_seconds_from_content
326
+ # Note this duration does assume we started playing at `time_offset`
327
+ duration = decoder.metadata.duration_seconds
328
+
329
+ metadata = VideoMetadata(
330
+ total_num_frames=decoder.metadata.num_frames,
331
+ fps=decoder.metadata.average_fps,
332
+ duration=duration,
333
+ video_backend="torchcodec",
334
+ height=decoder.metadata.height,
335
+ width=decoder.metadata.width,
336
+ )
337
+
338
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
339
+
340
+ # Floating point/rounding issues might cause `target_timestamps` to be very slightly
341
+ # out-of-bounds, to handle this we sanity check then clip them
342
+ assert all(x >= 0 for x in target_timestamps)
343
+ assert all(x < duration+1e-6 for x in target_timestamps)
344
+ # 1e-6 padding since torchcodec can throw out-of-bounds errors even if you ask for the
345
+ # exact boundary value, we should still get the first/last frame anyway
346
+ max_timestamp = decoder.metadata.end_stream_seconds_from_content - 1e-6
347
+ min_timestamp = decoder.metadata.begin_stream_seconds_from_content + 1e-6
348
+ # Note we avoid using numpy ops here to reduce floating precision issues
349
+ timestamps = [x + time_offset for x in target_timestamps]
350
+ timestamps = [max(min_timestamp, min(max_timestamp, x)) for x in timestamps]
351
+
352
+ video = decoder.get_frames_played_at(timestamps).data.numpy().transpose(0, 2, 3, 1) # Convert to THWC format
353
+ target_timestamps = np.array(target_timestamps)
354
+ metadata.frames_indices = target_timestamps * metadata.fps
355
+
356
+ return video, metadata
357
+
358
+
359
+ def read_video_pyav(
360
+ video_path,
361
+ sample_timestamps_fn: Callable,
362
+ **kwargs,
363
+ ) -> np.ndarray:
364
+ """
365
+ Decode a video using the PyAV backend.
366
+
367
+ Args:
368
+ video_path (`str`):
369
+ Path to the video file.
370
+ sample_timestamps_fn (`Callable`):
371
+ A callable function that will return timestamps at which the video should be sampled.
372
+
373
+ Returns:
374
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
375
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
376
+ - `VideoMetadata` object.
377
+ """
378
+ # Lazy import torchcodec
379
+ import importlib
380
+ av = importlib.import_module("av")
381
+
382
+ with av.open(video_path) as container:
383
+ video_stream = container.streams.video[0]
384
+ fps = video_stream.average_rate or video_stream.guessed_rate
385
+ it = container.decode(video=0)
386
+ frames = list(it)
387
+
388
+ stream = container.streams.video[0]
389
+ start = frames[0].pts * stream.time_base
390
+ container_end = stream.duration
391
+ if container_end is not None:
392
+ container_end *= stream.time_base
393
+ if container_end is None or container_end < frames[-1].pts:
394
+ # Some problem with stream duration, so use the frame PTS directly
395
+ # and guess the duration of the last frame
396
+ end = frames[-1].pts * stream.time_base + 1/fps
397
+ else:
398
+ end = container_end
399
+ duration = float(end - start)
400
+
401
+ metadata = VideoMetadata(
402
+ total_num_frames=len(frames),
403
+ fps=float(fps),
404
+ duration=float(duration),
405
+ video_backend="pyav",
406
+ height=video_stream.height,
407
+ width=video_stream.width,
408
+ )
409
+
410
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
411
+ offset = float(start)
412
+
413
+ target_timestamps = np.array(target_timestamps)
414
+ end_time_stamps = np.array([float(frame.pts * stream.time_base) for frame in frames[1:]] + [duration])
415
+ indices = np.searchsorted(end_time_stamps, target_timestamps + offset, side='right')
416
+ indices = np.minimum(indices, len(end_time_stamps) - 1)
417
+
418
+ video = np.stack(
419
+ [frames[i].to_ndarray(format="rgb24", channel_last=True) for i in indices],
420
+ axis=0,
421
+ )
422
+
423
+ metadata.frames_indices = target_timestamps * fps
424
+
425
+ return video, metadata
426
+
427
+
428
+ VIDEO_DECODERS = {
429
+ "decord": read_video_decord,
430
+ "torchcodec": read_video_torchcodec,
431
+ "pyav": read_video_pyav,
432
+ }
433
+
434
+
435
+ def load_video(
436
+ video: VideoInput,
437
+ backend: str = "decord",
438
+ sample_timestamps_fn: Optional[Callable] = None,
439
+ **kwargs,
440
+ ):
441
+ """
442
+ Loads `video` to a numpy array.
443
+
444
+ Args:
445
+ video (`VideoInput`):
446
+ The video to convert to the numpy array format. Can be a link to video or local path.
447
+ backend (`str`, *optional*, defaults to `"decord"`):
448
+ The backend to use when loading the video. Can be any of ["decord", "pyav", ""torchcodec"]. Defaults to "decord".
449
+ sample_timestamps_fn (`Callable`):
450
+ A callable function that will return timestamps at which the video should be sampled.
451
+ """
452
+
453
+ # Early exit if provided an array or `PIL` frames
454
+ if not isinstance(video, str):
455
+ metadata = [None] * len(video)
456
+ return video, metadata
457
+
458
+ if urlparse(video).netloc in ["www.youtube.com", "youtube.com"]:
459
+ if not is_yt_dlp_available():
460
+ raise ImportError("To load a video from YouTube url you have to install `yt_dlp` first.")
461
+ # Lazy import from yt_dlp
462
+ import importlib
463
+ yt_dlp = importlib.import_module("yt_dlp")
464
+
465
+ buffer = BytesIO()
466
+ with redirect_stdout(buffer), yt_dlp.YoutubeDL() as f:
467
+ f.download([video])
468
+ bytes_obj = buffer.getvalue()
469
+ file_obj = BytesIO(bytes_obj)
470
+ elif video.startswith("http://") or video.startswith("https://"):
471
+ file_obj = BytesIO(requests.get(video).content)
472
+ elif os.path.isfile(video):
473
+ file_obj = video
474
+ else:
475
+ raise TypeError("Incorrect format used for video. Should be an url linking to an video or a local path.")
476
+
477
+ # can also load with decord, but not cv2/torchvision
478
+ # both will fail in case of url links
479
+ video_is_url = video.startswith("http://") or video.startswith("https://")
480
+ if video_is_url and backend == "opencv":
481
+ raise ValueError("If you are trying to load a video from URL, you cannot use 'opencv' as backend")
482
+
483
+ if (
484
+ (not is_decord_available() and backend == "decord")
485
+ or (not is_torchcodec_available() and backend == "torchcodec")
486
+ or (not is_av_available() and backend == "pyav")
487
+ ):
488
+ raise ImportError(
489
+ f"You chose backend={backend} for loading the video but the required library is not found in your environment "
490
+ f"Make sure to install {backend} before loading the video."
491
+ )
492
+
493
+ video_decoder = VIDEO_DECODERS[backend]
494
+ video, metadata = video_decoder(file_obj, sample_timestamps_fn, **kwargs)
495
+ return video, metadata
496
+
497
+
498
+ def get_target_fps(
499
+ video_fps: float,
500
+ max_frames: int,
501
+ total_frames: int,
502
+ frame_sample_mode: str,
503
+ candidate_target_fps: tuple[float],
504
+ ) -> float:
505
+ """
506
+ Get the target fps that best spans the video and has the most frames sampled
507
+ """
508
+ num_frames_sampled = 0
509
+ selected_target_fps = None
510
+ for target_fps in candidate_target_fps:
511
+ step_size = max(int(video_fps / target_fps), 1)
512
+ num_frames_sampled_at_fps = int(total_frames / step_size)
513
+ if num_frames_sampled == 0:
514
+ if "uniform" in frame_sample_mode:
515
+ if num_frames_sampled_at_fps > max_frames:
516
+ break
517
+ selected_target_fps = target_fps
518
+ num_frames_sampled = num_frames_sampled_at_fps
519
+
520
+ else:
521
+ # the candidate sampling fps increases so frame count can't decrease
522
+ assert num_frames_sampled <= num_frames_sampled_at_fps
523
+ if num_frames_sampled_at_fps > max_frames:
524
+ # choose the sampling fps that spans the video
525
+ continue
526
+
527
+ elif num_frames_sampled_at_fps > num_frames_sampled:
528
+ # both are less than max_frames, choose the one with higher density of frames sampled
529
+ selected_target_fps = target_fps
530
+ num_frames_sampled = num_frames_sampled_at_fps
531
+ return selected_target_fps
532
+
533
+
534
+ def get_frame_times_and_chosen_fps(
535
+ selected_target_fps,
536
+ total_frames,
537
+ max_frames,
538
+ video_fps
539
+ ):
540
+ if selected_target_fps is None:
541
+ frame_indices = np.linspace(0, total_frames, max_frames, endpoint=False, dtype=int)
542
+ else:
543
+ step_size = max(int(video_fps / selected_target_fps), 1)
544
+ frame_indices = np.arange(0, total_frames, step_size)
545
+ if len(frame_indices) > max_frames:
546
+ frame_indices = frame_indices[:max_frames]
547
+ return selected_target_fps, frame_indices
548
+
549
+
550
+ class Molmo2VideoProcessorKwargs(VideosKwargs, total=False):
551
+ patch_size: Optional[int]
552
+ pooling_size: Optional[list[int]]
553
+ frame_sample_mode: Optional[str]
554
+ max_fps: Optional[int]
555
+ sampling_fps: Optional[int]
556
+
557
+
558
+ class Molmo2VideoProcessor(BaseVideoProcessor):
559
+ resample = PILImageResampling.BILINEAR
560
+ size = {"height": 378, "width": 378}
561
+ image_mean = IMAGENET_STANDARD_MEAN
562
+ image_std = IMAGENET_STANDARD_STD
563
+ do_resize = True
564
+ do_rescale = True
565
+ do_normalize = True
566
+ do_convert_rgb = True
567
+ patch_size = 14
568
+ pooling_size = [3, 3]
569
+ do_sample_frames = True
570
+ frame_sample_mode = "uniform_last_frame"
571
+ max_fps = 2
572
+ sampling_fps = 2
573
+ valid_kwargs = Molmo2VideoProcessorKwargs
574
+ model_input_names = ["pixel_values_videos", "video_token_pooling", "video_grids"]
575
+
576
+ def __init__(self, **kwargs: Unpack[Molmo2VideoProcessorKwargs]):
577
+ super().__init__(**kwargs)
578
+ if self.size is not None and (
579
+ self.size.get("height", None) is None or self.size.get("width", None) is None
580
+ ):
581
+ raise ValueError("size must contain 'height' and 'width' keys.")
582
+
583
+ def _further_process_kwargs(
584
+ self,
585
+ size: Optional[SizeDict] = None,
586
+ **kwargs,
587
+ ) -> dict:
588
+ """
589
+ Update kwargs that need further processing before being validated
590
+ Can be overridden by subclasses to customize the processing of kwargs.
591
+ """
592
+ if size is not None and ("height" not in size or "width" not in size):
593
+ raise ValueError("size must contain 'height' and 'width' keys.")
594
+
595
+ return super()._further_process_kwargs(size=size, **kwargs)
596
+
597
+ def sample_times(
598
+ self,
599
+ metadata: VideoMetadata,
600
+ frame_sample_mode: str,
601
+ num_frames: int,
602
+ max_fps: Optional[int] = None,
603
+ sampling_fps: Optional[int] = None,
604
+ **kwargs,
605
+ ) -> np.ndarray:
606
+ """
607
+ Time-based sampling if an array video is passed
608
+ Args:
609
+ metadata (`VideoMetadata`):
610
+ Metadata of the video containing information about total duration, fps and total number of frames.
611
+ frame_sample_mode (`str`, *optional*):
612
+ Mode to sample frames. Defaults to `self.frame_sample_mode`.
613
+ num_frames (`int`, *optional*):
614
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
615
+ man_fps (`int`, *optional*):
616
+ Maximum frames per second to sample.
617
+ sampling_fps (`int`, *optional*):
618
+ Sampling frames per second. Defaults to `self.sampling_fps`.
619
+ Used when `frame_sample_mode` is `"fps"`.
620
+ """
621
+ frame_sample_mode = frame_sample_mode or self.frame_sample_mode
622
+ num_frames = num_frames or self.num_frames
623
+ sampling_fps = sampling_fps or self.sampling_fps
624
+
625
+ duration = metadata.duration or metadata.total_num_frames / metadata.fps
626
+ if frame_sample_mode == "fps":
627
+ candidate_target_fps = get_candidate_target_fps(metadata.fps, sampling_fps)
628
+ # Try larger and larger FPSs until we hit one that can't span the video
629
+ target_fps = candidate_target_fps[0]
630
+ for candidate_fps in candidate_target_fps[1:]:
631
+ if num_frames / candidate_fps < duration:
632
+ break
633
+ target_fps = candidate_fps
634
+ times = np.arange(0, num_frames) / target_fps
635
+ times = times[times < duration]
636
+ return times
637
+ elif frame_sample_mode == "uniform_last_frame":
638
+ if max_fps is not None:
639
+ max_duration = (num_frames-1) / max_fps # -1 to include the last frame
640
+ if max_duration < duration:
641
+ times = np.linspace(
642
+ 0, duration, num=num_frames, endpoint=True, dtype=np.float64
643
+ )
644
+ else:
645
+ times = np.arange(0.0, stop=duration, step=1/max_fps)
646
+ times = np.concatenate([times, [duration]], axis=0)
647
+ assert len(times) <= num_frames
648
+ else:
649
+ times = np.linspace(
650
+ 0, duration, num=num_frames, endpoint=True, dtype=np.float64
651
+ )
652
+ return times
653
+ else:
654
+ raise NotImplementedError(frame_sample_mode)
655
+
656
+ def sample_frames(
657
+ self,
658
+ metadata: VideoMetadata,
659
+ frame_sample_mode: Optional[str] = None,
660
+ num_frames: Optional[int] = None,
661
+ max_fps: Optional[int] = None,
662
+ sampling_fps: Optional[int] = None,
663
+ **kwargs,
664
+ ) -> np.ndarray:
665
+ """
666
+ Frame-based sampling if an array video is passed
667
+ Args:
668
+ metadata (`VideoMetadata`):
669
+ Metadata of the video containing information about total duration, fps and total number of frames.
670
+ frame_sample_mode (`str`, *optional*):
671
+ Mode to sample frames. Defaults to `self.frame_sample_mode`.
672
+ num_frames (`int`, *optional*):
673
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
674
+ max_fps (`int`, *optional*):
675
+ Maximum frames per second to sample.
676
+ sampling_fps (`int`, *optional*):
677
+ Sampling frames per second. Defaults to `self.sampling_fps`.
678
+ Used when `frame_sample_mode` is `"fps"`.
679
+ """
680
+ frame_sample_mode = frame_sample_mode or self.frame_sample_mode
681
+ num_frames = num_frames or self.num_frames
682
+ sampling_fps = sampling_fps or self.sampling_fps
683
+
684
+ total_num_frames = metadata.total_num_frames
685
+ if frame_sample_mode == "uniform_last_frame" and max_fps is not None:
686
+ duration = total_num_frames / metadata.fps
687
+ if total_num_frames <= 2:
688
+ return np.arange(total_num_frames).astype(int)
689
+ if duration > (num_frames - 1) / max_fps: # -1 to include the last frame
690
+ # uniform fallback
691
+ indices = np.linspace(
692
+ 0,
693
+ total_num_frames - 1,
694
+ num=min(num_frames, total_num_frames),
695
+ endpoint=True,
696
+ ).astype(int)
697
+ return indices
698
+ else:
699
+ float_indices = np.arange(
700
+ 0.0, stop=total_num_frames - 1, step=float(metadata.fps / max_fps),
701
+ )
702
+ if np.round(float_indices[-1]) != total_num_frames - 1:
703
+ float_indices = np.concatenate([float_indices, [total_num_frames - 1]], axis=0)
704
+ indices = np.round(float_indices).astype(int)
705
+ assert indices[-1] < total_num_frames
706
+ assert len(float_indices) <= num_frames
707
+ return indices
708
+ elif frame_sample_mode == "uniform_last_frame":
709
+ indices = np.linspace(
710
+ 0, total_num_frames - 1, num=min(num_frames, total_num_frames), endpoint=True,
711
+ ).astype(int)
712
+ return indices
713
+ elif frame_sample_mode == "fps":
714
+ candidate_target_fps = get_candidate_target_fps(metadata.fps, sampling_fps)
715
+ selected_target_fps = get_target_fps(
716
+ metadata.fps,
717
+ num_frames,
718
+ total_num_frames,
719
+ frame_sample_mode,
720
+ candidate_target_fps,
721
+ )
722
+ _, indices = get_frame_times_and_chosen_fps(
723
+ selected_target_fps,
724
+ total_num_frames,
725
+ num_frames,
726
+ metadata.fps,
727
+ )
728
+ return indices
729
+ else:
730
+ raise NotImplementedError(frame_sample_mode)
731
+
732
+ def fetch_videos(
733
+ self,
734
+ video_url_or_urls: Union[str, list[str], list[list[str]]],
735
+ sample_timestamps_fn=None
736
+ ):
737
+ """
738
+ Convert a single or a list of urls into the corresponding `np.array` objects.
739
+
740
+ If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
741
+ returned.
742
+ """
743
+ if (
744
+ (not is_decord_available())
745
+ and (not is_torchcodec_available())
746
+ and (not is_av_available())
747
+ ):
748
+ raise ImportError(
749
+ "Molmo2VideoProcessor requires `decord`, `torchcodec`, or `av` to be installed."
750
+ )
751
+
752
+ if is_decord_available():
753
+ backend = "decord"
754
+ elif is_torchcodec_available():
755
+ warnings.warn(
756
+ "`decord` is not installed and cannot be used to decode the video by default. "
757
+ "Falling back to `torchcodec`."
758
+ )
759
+ backend = "torchcodec"
760
+ else:
761
+ warnings.warn(
762
+ "`decord` is not installed and cannot be used to decode the video by default. "
763
+ "Falling back to `PyAV`."
764
+ )
765
+ backend = "pyav"
766
+
767
+ if isinstance(video_url_or_urls, list):
768
+ return list(zip(*[self.fetch_videos(x, sample_timestamps_fn=sample_timestamps_fn) for x in video_url_or_urls]))
769
+ else:
770
+ return load_video(video_url_or_urls, backend=backend, sample_timestamps_fn=sample_timestamps_fn)
771
+
772
+ def _decode_and_sample_videos(
773
+ self,
774
+ videos: VideoInput,
775
+ video_metadata: Union[VideoMetadata, dict],
776
+ do_sample_frames: Optional[bool] = None,
777
+ sample_indices_fn: Optional[Callable] = None,
778
+ sample_timestamps_fn: Optional[Callable] = None,
779
+ ):
780
+ """
781
+ Decode input videos and sample frames if needed.
782
+ """
783
+ videos = make_batched_videos(videos)
784
+ video_metadata = make_batched_metadata(videos, video_metadata=video_metadata)
785
+
786
+ # Framed-based sampling if an array video is passed
787
+ # Otherwise, time-based sampling with decoding
788
+ if is_valid_video(videos[0]) and do_sample_frames:
789
+ assert video_metadata[0].fps is not None, "FPS must be provided for video input"
790
+ sampled_videos = []
791
+ sampled_metadata = []
792
+ for video, metadata in zip(videos, video_metadata):
793
+ indices = sample_indices_fn(metadata=metadata)
794
+ metadata.frames_indices = indices
795
+ sampled_videos.append(video[indices])
796
+ sampled_metadata.append(metadata)
797
+ videos = sampled_videos
798
+ video_metadata = sampled_metadata
799
+ elif not is_valid_video(videos[0]):
800
+ if sample_indices_fn is None:
801
+ logger.warning(
802
+ "do_sample_frames is False, but video array is not provided: "
803
+ "Will decode the video and sample frames using Molmo2's default sampling mode"
804
+ )
805
+ if isinstance(videos[0], list):
806
+ raise ValueError(
807
+ "A list of images is not supported for video input!"
808
+ )
809
+ else:
810
+ videos, video_metadata = self.fetch_videos(videos, sample_timestamps_fn=sample_timestamps_fn)
811
+
812
+ return videos, video_metadata
813
+
814
+ def _prepare_input_videos(
815
+ self,
816
+ videos: VideoInput,
817
+ **kwargs,
818
+ ) -> list[np.ndarray]:
819
+ processed_videos = [to_numpy(video) for video in videos]
820
+ return processed_videos
821
+
822
+ def preprocess(
823
+ self,
824
+ videos: VideoInput,
825
+ **kwargs: Unpack[Molmo2VideoProcessorKwargs],
826
+ ) -> BatchFeature:
827
+ validate_kwargs(
828
+ captured_kwargs=kwargs.keys(),
829
+ valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) +
830
+ ["return_tensors", "return_pointing_metadata"],
831
+ )
832
+
833
+ # Set default kwargs from self. This ensures that if a kwarg is not provided
834
+ # by the user, it gets its default value from the instance, or is set to None.
835
+ for kwarg_name in self.valid_kwargs.__annotations__:
836
+ kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None))
837
+
838
+ do_sample_frames = kwargs.pop("do_sample_frames")
839
+ video_metadata = kwargs.pop("video_metadata")
840
+
841
+ sample_indices_fn = partial(self.sample_frames, **kwargs) if do_sample_frames else None
842
+ sample_timestamps_fn = partial(self.sample_times, **kwargs)
843
+ videos, video_metadata = self._decode_and_sample_videos(
844
+ videos,
845
+ video_metadata=video_metadata,
846
+ do_sample_frames=do_sample_frames,
847
+ sample_indices_fn=sample_indices_fn,
848
+ sample_timestamps_fn=sample_timestamps_fn,
849
+ )
850
+ videos = self._prepare_input_videos(videos=videos)
851
+
852
+ kwargs = self._further_process_kwargs(**kwargs)
853
+
854
+ return_metadata = kwargs.pop("return_metadata")
855
+ preprocessed_videos = self._preprocess(videos=videos, **kwargs)
856
+ if return_metadata:
857
+ preprocessed_videos["video_metadata"] = video_metadata
858
+ return preprocessed_videos
859
+
860
+ def _preprocess(
861
+ self,
862
+ videos: list[np.ndarray],
863
+ size: Optional[SizeDict] = None,
864
+ resample: Optional[PILImageResampling] = None,
865
+ image_mean: Optional[Union[float, list[float]]] = None,
866
+ image_std: Optional[Union[float, list[float]]] = None,
867
+ do_convert_rgb: Optional[bool] = None,
868
+ patch_size: Optional[int] = None,
869
+ pooling_size: Optional[list[int]] = None,
870
+ return_tensors: Optional[Union[str, TensorType]] = None,
871
+ return_pointing_metadata: bool = False,
872
+ **kwargs,
873
+ ) -> BatchFeature:
874
+ """
875
+ Preprocess a video for the model.
876
+ Args:
877
+ videos (`VideoInput`):
878
+ Video to preprocess.
879
+ size (`SizeDict`, *optional*, defaults to `self.size`):
880
+ Size of the image after resizing.
881
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
882
+ Resampling filter to use when resizing the image. This can be one of the enum `PILImageResampling`. Only
883
+ has an effect if `do_resize` is set to `True`.
884
+ image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
885
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
886
+ image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
887
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
888
+ `True`.
889
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
890
+ Whether to convert the image to RGB.
891
+ patch_size (`int`, *optional*, defaults to `self.patch_size`):
892
+ The spatial patch size of the vision encoder.
893
+ pooling_size (`list[int]`, *optional*, defaults to `self.pooling_size`):
894
+ The pooling size of the vision adapter.
895
+ return_tensors (`str` or `TensorType`, *optional*):
896
+ The type of tensors to return. Can be one of:
897
+ - Unset: Return a list of `np.ndarray`.
898
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
899
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
900
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
901
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
902
+
903
+ Returns:
904
+ A `BatchFeature` containing the following keys:
905
+ - `pixel_values_videos`: The preprocessed videos.
906
+ - `video_token_pooling`: The indices of the patches in `crops` to pool for each token in `video_tokens`.
907
+ - `video_grids`: The video grids.
908
+ """
909
+ if size.height is None or size.width is None:
910
+ raise ValueError("size must contain 'height' and 'width' keys.")
911
+
912
+ base_image_input_size = [size.height, size.width]
913
+
914
+ resample = resample or self.resample
915
+ image_mean = image_mean or self.image_mean
916
+ image_std = image_std or self.image_std
917
+ do_convert_rgb = do_convert_rgb or self.do_convert_rgb
918
+
919
+ patch_size = patch_size or self.patch_size
920
+ pooling_size = pooling_size or self.pooling_size
921
+
922
+ image_pooling_h, image_pooling_w = pooling_size
923
+
924
+ batch_grids = []
925
+ batch_crops = []
926
+ batch_pooled_patches_idx = []
927
+
928
+ for video in videos:
929
+ all_crops = []
930
+ pooled_patches_idx = []
931
+
932
+ for frame in video:
933
+ image_grid, crops, pooled_idx = image_to_patches_and_grids(
934
+ frame,
935
+ base_image_input_size,
936
+ resample,
937
+ image_mean,
938
+ image_std,
939
+ patch_size,
940
+ image_pooling_w,
941
+ image_pooling_h,
942
+ )
943
+ offset = sum(np.prod(x.shape[:2]) for x in all_crops)
944
+ pooled_idx_with_offset = np.where(pooled_idx >= 0, pooled_idx + offset, pooled_idx)
945
+ pooled_patches_idx.append(pooled_idx_with_offset)
946
+ all_crops.append(crops)
947
+
948
+ video_grid = np.array([len(video), image_grid[0], image_grid[1]])
949
+ all_crops = np.concatenate(all_crops, 0)
950
+ pooled_patches_idx = np.concatenate(pooled_patches_idx, 0)
951
+
952
+ batch_grids.append(video_grid)
953
+ batch_crops.append(all_crops)
954
+ batch_pooled_patches_idx.append(pooled_patches_idx)
955
+
956
+ video_grids = np.stack(batch_grids, 0)
957
+ pixel_values_videos = np.concatenate(batch_crops, 0)
958
+ video_token_pooling = np.concatenate(batch_pooled_patches_idx, 0)
959
+
960
+ data = BatchFeature(dict(
961
+ pixel_values_videos=pixel_values_videos,
962
+ video_token_pooling=video_token_pooling,
963
+ video_grids=video_grids,
964
+ ), tensor_type=return_tensors)
965
+ if return_pointing_metadata:
966
+ t = pixel_values_videos.shape[0]
967
+ assert base_image_input_size[0] % self.patch_size == 0
968
+ assert base_image_input_size[1] % self.patch_size == 0
969
+ crop_w = base_image_input_size[0] // self.patch_size
970
+ crop_h = base_image_input_size[1] // self.patch_size
971
+ data["subpatch_mapping"] = np.arange(t*crop_w*crop_h).reshape([t, crop_h, crop_w])
972
+ data["video_token_pooling_np"] = video_token_pooling
973
+ return data
974
+
975
+
976
+ Molmo2VideoProcessor.register_for_auto_class()
vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
wilddet3d_alldata_all_prompt_v1.0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8b6a9e548f733ba62625a0d2adc4b0f4fdb6007ee11d9927f9c1027010fee57
3
+ size 4733213037