| import torch |
| from PIL import Image |
| from transformers import AutoModelForCausalLM, AutoProcessor |
| from transformers.image_utils import to_numpy_array, PILImageResampling, ChannelDimension |
| from transformers.image_transforms import resize, to_channel_dimension_format |
| import os |
| from typing import Dict, List, Any |
|
|
| |
| DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
| |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| class ImageToTextPipeline: |
| def __init__(self,model_path:str): |
| |
| self.PROCESSOR = AutoProcessor.from_pretrained( |
| model_path, |
| trust_remote_code=True, |
| |
| ) |
| self.MODEL = AutoModelForCausalLM.from_pretrained( |
| model_path, |
| |
| trust_remote_code=True, |
| torch_dtype=torch.bfloat16, |
| ).to(DEVICE) |
| self.image_seq_len = self.MODEL.config.perceiver_config.resampler_n_latents |
| self.BOS_TOKEN = self.PROCESSOR.tokenizer.bos_token |
| self.BAD_WORDS_IDS = self.PROCESSOR.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids |
|
|
| |
| def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| image = Image.open(data["file"]).convert("RGB") |
| inputs = self.PROCESSOR.tokenizer( |
| f"{self.BOS_TOKEN}<fake_token_around_image>{'<image>' * self.image_seq_len}<fake_token_around_image>", |
| return_tensors="pt", |
| add_special_tokens=False, |
| ) |
| inputs["pixel_values"] = self.PROCESSOR.image_processor([image], transform=self.custom_transform) |
| inputs = {k: v.to(DEVICE) for k, v in inputs.items()} |
| |
| generated_ids = self.MODEL.generate(**inputs, bad_words_ids=self.BAD_WORDS_IDS, max_length=4096) |
| generated_text = self.PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)[0] |
| return {"text": generated_text} |
| |
| |
| |
| def convert_to_rgb(self, image): |
| if image.mode == "RGB": |
| return image |
| image_rgba = image.convert("RGBA") |
| background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) |
| alpha_composite = Image.alpha_composite(background, image_rgba) |
| alpha_composite = alpha_composite.convert("RGB") |
| return alpha_composite |
| |
| def custom_transform(self, x): |
| x = self.convert_to_rgb(x) |
| x = to_numpy_array(x) |
| x = resize(x, (960, 960), resample=PILImageResampling.BILINEAR) |
| x = self.PROCESSOR.image_processor.rescale(x, scale=1 / 255) |
| x = self.PROCESSOR.image_processor.normalize( |
| x, |
| mean=self.PROCESSOR.image_processor.image_mean, |
| std=self.PROCESSOR.image_processor.image_std |
| ) |
| x = to_channel_dimension_format(x, ChannelDimension.FIRST) |
| x = torch.tensor(x) |
| return x |