| from typing import List, Optional, Union |
|
|
| import numpy as np |
|
|
| from transformers import BatchFeature |
| from transformers.tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput |
| from transformers.image_utils import ImageInput, VideoInput |
| from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs |
|
|
|
|
| class HithinkOmniVideosProcessorKwargs(VideosKwargs, total=False): |
| fps: Union[List[float], float] |
|
|
|
|
| class HithinkOmniProcessorKwargs(ProcessingKwargs, total=False): |
| videos_kwargs: HithinkOmniVideosProcessorKwargs |
| _defaults = { |
| "text_kwargs": { |
| "padding": False, |
| }, |
| "videos_kwargs": {"fps": 2.0}, |
| } |
|
|
|
|
| class HithinkOmniProcessor(ProcessorMixin): |
| r""" |
| Constructs a HithinkOmni processor which wraps a Qwen2.5-VL image processor and a HithinkOmni tokenizer into a single processor. |
| [`HithinkOmniProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`PreTrainedTokenizerFast`]. See the |
| [`~HithinkOmniProcessor.__call__`] and [`~HithinkOmniProcessor.decode`] for more information. |
| Args: |
| image_processor ([`Qwen2VLImageProcessor`], *optional*): |
| The image processor is a required input. |
| feature_extractor ([`WhisperFeatureExtractor`], *optional*): |
| The feature extractor is a required input. |
| tokenizer ([`PreTrainedTokenizerFast`], *optional*): |
| The tokenizer is a required input. |
| chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages |
| in a chat into a tokenizable string. |
| """ |
|
|
| attributes = ["image_processor", "feature_extractor", "tokenizer"] |
| valid_kwargs = ["chat_template"] |
|
|
| image_processor_class = "Qwen2VLImageProcessor" |
| feature_extractor_class = "WhisperFeatureExtractor" |
| tokenizer_class = "PreTrainedTokenizerFast" |
|
|
| def __init__(self, image_processor=None, feature_extractor=None, tokenizer=None, chat_template=None, **kwargs): |
| tokenizer.model_input_names = ["input_ids", "attention_mask"] |
| super().__init__(image_processor, feature_extractor, tokenizer, chat_template=chat_template) |
| self.image_token = getattr(tokenizer, 'image_token', '<|image_pad|>') |
| self.video_token = getattr(tokenizer, 'video_token', '<|video_pad|>') |
| self.chat_template = tokenizer.chat_template if chat_template is None else chat_template |
|
|
| def __call__( |
| self, |
| images: ImageInput = None, |
| text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, |
| videos: VideoInput = None, |
| audios: Union[np.ndarray, List[np.ndarray]] = None, |
| sampling_rate: Optional[int] = None, |
| **kwargs: Unpack[HithinkOmniProcessorKwargs], |
| ) -> BatchFeature: |
| """ |
| Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` |
| and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode |
| the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to |
| Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`. |
| |
| Args: |
| images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): |
| The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch |
| tensor. Both channels-first and channels-last formats are supported. |
| text (`str`, `List[str]`, `List[List[str]]`): |
| The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings |
| (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set |
| `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). |
| videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): |
| The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch |
| tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. |
| audios (`np.ndarray`, `List[np.ndarray]`): |
| The audio or batch of audios to be prepared. Each audio can be a NumPy array. |
| sampling_rate (`int`, defaults to 16000): |
| The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). |
| return_tensors (`str` or [`~utils.TensorType`], *optional*): |
| If set, will return tensors of a particular framework. Acceptable values are: |
| - `'tf'`: Return TensorFlow `tf.constant` objects. |
| - `'pt'`: Return PyTorch `torch.Tensor` objects. |
| - `'np'`: Return NumPy `np.ndarray` objects. |
| - `'jax'`: Return JAX `jnp.ndarray` objects. |
| |
| Returns: |
| [`BatchFeature`]: A [`BatchFeature`] with the following fields: |
| |
| - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. |
| - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when |
| `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not |
| `None`). |
| - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. |
| - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`. |
| - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`. |
| - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`. |
| - **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`. |
| """ |
| output_kwargs = self._merge_kwargs( |
| HithinkOmniProcessorKwargs, |
| tokenizer_init_kwargs=self.tokenizer.init_kwargs, |
| **kwargs, |
| ) |
| if images is not None: |
| image_inputs = self.image_processor(images=images, videos=None, **output_kwargs["images_kwargs"]) |
| image_grid_thw = image_inputs["image_grid_thw"] |
| else: |
| image_inputs = {} |
| image_grid_thw = None |
|
|
| if videos is not None: |
| videos_inputs = self.image_processor(images=None, videos=videos, **output_kwargs["images_kwargs"]) |
| video_grid_thw = videos_inputs["video_grid_thw"] |
|
|
| fps = output_kwargs["videos_kwargs"].pop("fps", 2.0) |
| if isinstance(fps, (int, float)): |
| second_per_grid_ts = [self.image_processor.temporal_patch_size / fps] * len(video_grid_thw) |
| elif hasattr(fps, "__len__") and len(fps) == len(video_grid_thw): |
| second_per_grid_ts = [self.image_processor.temporal_patch_size / tmp for tmp in fps] |
| else: |
| raise ValueError( |
| f"The length of fps ({len(fps) if hasattr(fps, '__len__') else fps}) must be equal to the length of video_grid_thw ({len(video_grid_thw)}) or fps should be a single number." |
| ) |
| videos_inputs.update({"second_per_grid_ts": second_per_grid_ts}) |
|
|
| else: |
| videos_inputs = {} |
| video_grid_thw = None |
|
|
| if not isinstance(text, list): |
| text = [text] |
|
|
| if image_grid_thw is not None: |
| merge_length = self.image_processor.merge_size**2 |
| index = 0 |
| for i in range(len(text)): |
| while self.image_token in text[i]: |
| text[i] = text[i].replace( |
| self.image_token, |
| "<|placeholder|>" * (image_grid_thw[index].prod() // merge_length), |
| 1, |
| ) |
| index += 1 |
| text[i] = text[i].replace("<|placeholder|>", self.image_token) |
|
|
| if video_grid_thw is not None: |
| merge_length = self.image_processor.merge_size**2 |
| index = 0 |
| for i in range(len(text)): |
| while self.video_token in text[i]: |
| text[i] = text[i].replace( |
| self.video_token, |
| "<|placeholder|>" * (video_grid_thw[index].prod() // merge_length), |
| 1, |
| ) |
| index += 1 |
| text[i] = text[i].replace("<|placeholder|>", self.video_token) |
|
|
| if audios is not None: |
| audio_inputs = self.feature_extractor( |
| audios, sampling_rate=sampling_rate, return_attention_mask=True, padding="max_length", **kwargs |
| ) |
| audio_inputs["feature_attention_mask"] = audio_inputs.pop( |
| "attention_mask" |
| ) |
| audio_output_lengths = self.get_feat_extract_output_lengths( |
| audio_inputs['feature_attention_mask'].sum(-1) |
| ) |
| index = 0 |
| for i in range(len(text)): |
| while "<|AUDIO|>" in text[i]: |
| text[i] = text[i].replace( |
| "<|AUDIO|>", "<|placeholder|>" * audio_output_lengths[index], 1 |
| ) |
| index += 1 |
| text[i] = text[i].replace("<|placeholder|>", "<|AUDIO|>") |
| else: |
| audio_inputs = {} |
|
|
| text_inputs =self.tokenizer(text, **output_kwargs["text_kwargs"]) |
|
|
| return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs, **audio_inputs}) |
|
|
| @staticmethod |
| def get_feat_extract_input_length(audio_length): |
| """ |
| Computes the input length of the audio encoder (i.e. output of the feature extractor) |
| e.g. 30-second audio has 480,000 samples (sampling_rate = 16,000), the feature length will be 3,000 |
| """ |
| return int(np.ceil((audio_length - 40) / 160)) |
|
|
| @staticmethod |
| def get_feat_extract_output_lengths(input_lengths): |
| """ |
| Computes the output length of the convolutional layers and the output length of the audio encoder |
| """ |
| input_lengths = (input_lengths - 1) // 2 + 1 |
| output_lengths = (input_lengths - 2) // 2 + 1 |
| return output_lengths |
|
|
| def featurize_audio_chunk(self, audio: np.ndarray, is_last: bool, n_extracted_frames: int = 0, **kwargs): |
| """ |
| Extract the features from the audio chunk during streaming inference |
| """ |
| n_frames = (len(audio) - 40) / 160 |
| n_frames = int(np.ceil(n_frames) if is_last else np.floor(n_frames)) |
| n_new_frames = n_frames - n_extracted_frames |
| i_end = n_frames * 160 + 40 |
| i_start = max(0, (n_extracted_frames + 1 - 3) * 160) |
| if n_new_frames <= 0 or n_frames < 2: |
| return |
| a = audio[i_start: i_end] |
| if is_last and (n_pad := int(np.ceil(len(a) / 160)) * 160 - len(a)): |
| a = np.pad(a, [0, n_pad]) |
| features = self.feature_extractor( |
| a, sampling_rate=self.feature_extractor.sampling_rate, padding='do_not_pad', **kwargs |
| )['input_features'] |
| return features[:, :, -n_new_frames:] |
|
|
| def batch_decode(self, *args, **kwargs): |
| """ |
| This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please |
| refer to the docstring of this method for more information. |
| """ |
| return self.tokenizer.batch_decode(*args, **kwargs) |
|
|
| def decode(self, *args, **kwargs): |
| """ |
| This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to |
| the docstring of this method for more information. |
| """ |
| return self.tokenizer.decode(*args, **kwargs) |
|
|
| def post_process_image_text_to_text(self, generated_outputs): |
| """ |
| Post-process the output of the model to decode the text. |
| |
| Args: |
| generated_outputs (`torch.Tensor` or `np.ndarray`): |
| The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` |
| or `(sequence_length,)`. |
| |
| Returns: |
| `List[str]`: The decoded text. |
| """ |
| return self.tokenizer.batch_decode( |
| generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
|
|
| @property |
| def model_input_names(self): |
| tokenizer_input_names = self.tokenizer.model_input_names |
| image_processor_input_names = self.image_processor.model_input_names |
| feature_extractor_input_names = self.feature_extractor.model_input_names |
| return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names |
| + feature_extractor_input_names + ["feature_attention_mask"])) |
|
|