| """ |
| ADOBE CONFIDENTIAL |
| Copyright 2024 Adobe |
| All Rights Reserved. |
| NOTICE: All information contained herein is, and remains |
| the property of Adobe and its suppliers, if any. The intellectual |
| and technical concepts contained herein are proprietary to Adobe |
| and its suppliers and are protected by all applicable intellectual |
| property laws, including trade secret and copyright laws. |
| Dissemination of this information or reproduction of this material |
| is strictly forbidden unless prior written permission is obtained |
| from Adobe. |
| """ |
|
|
| from typing import Callable, List, Optional, Union |
| import inspect |
| import einops |
| import PIL.Image |
| import numpy as np |
| import torch as th |
|
|
| from diffusers import DiffusionPipeline |
| from diffusers.image_processor import VaeImageProcessor |
| from diffusers.models import AutoencoderKL, UNet2DConditionModel |
| from diffusers.schedulers import KarrasDiffusionSchedulers |
| from diffusers.utils.torch_utils import randn_tensor |
| from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput |
|
|
| from analogy_encoder import AnalogyEncoder |
| from analogy_projector import AnalogyProjector |
| from analogy_input_processor import AnalogyInputProcessor |
|
|
| class PatternAnalogyTrifuser(DiffusionPipeline): |
| r""" |
| This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
| library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) |
| """ |
|
|
| model_cpu_offload_seq = "bert->unet->vqvae" |
|
|
| analogy_input_processor: AnalogyInputProcessor |
| analogy_encoder: AnalogyEncoder |
| analogy_projector: AnalogyProjector |
| unet: UNet2DConditionModel |
| vae: AutoencoderKL |
| scheduler: KarrasDiffusionSchedulers |
| |
| def __init__(self, |
| analogy_input_processor: AnalogyInputProcessor, |
| analogy_projector: AnalogyProjector, |
| analogy_encoder: AnalogyEncoder, |
| unet: UNet2DConditionModel, |
| vae: AutoencoderKL, |
| scheduler: KarrasDiffusionSchedulers,): |
| |
| |
| super().__init__() |
| self.register_modules( |
| analogy_input_processor=analogy_input_processor, |
| analogy_encoder=analogy_encoder, |
| analogy_projector=analogy_projector, |
| unet=unet, |
| vae=vae, |
| scheduler=scheduler, |
| ) |
| self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) |
| self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) |
|
|
| |
| def check_inputs(self, analogy_prompt, negative_analogy_prompt, height, width, callback_steps): |
| if ( |
| not isinstance(analogy_prompt, th.Tensor) |
| and not isinstance(analogy_prompt, PIL.Image.Image) |
| and not isinstance(analogy_prompt, list) |
| ): |
| raise ValueError( |
| "`analogy_prompt` contents have to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" |
| f" {type(analogy_prompt)}" |
| ) |
| if not negative_analogy_prompt is None: |
| if ( |
| not isinstance(negative_analogy_prompt, th.Tensor) |
| and not isinstance(negative_analogy_prompt, PIL.Image.Image) |
| and not isinstance(negative_analogy_prompt, list) |
| ): |
| raise ValueError( |
| "`negative_analogy_prompt` contents have to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" |
| f" {type(negative_analogy_prompt)}" |
| ) |
|
|
|
|
| if height % 8 != 0 or width % 8 != 0: |
| raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") |
|
|
| if (callback_steps is None) or ( |
| callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) |
| ): |
| raise ValueError( |
| f"`callback_steps` has to be a positive integer but is {callback_steps} of type" |
| f" {type(callback_steps)}." |
| ) |
|
|
| |
| def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): |
| shape = ( |
| batch_size, |
| num_channels_latents, |
| int(height) // self.vae_scale_factor, |
| int(width) // self.vae_scale_factor, |
| ) |
| if isinstance(generator, list) and len(generator) != batch_size: |
| raise ValueError( |
| f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
| f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
| ) |
|
|
| if latents is None: |
| latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
| else: |
| latents = latents.to(device) |
|
|
| |
| latents = latents * self.scheduler.init_noise_sigma |
| return latents |
| |
| |
| def prepare_extra_step_kwargs(self, generator, eta): |
| |
| |
| |
| |
|
|
| accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
| extra_step_kwargs = {} |
| if accepts_eta: |
| extra_step_kwargs["eta"] = eta |
|
|
| |
| accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
| if accepts_generator: |
| extra_step_kwargs["generator"] = generator |
| return extra_step_kwargs |
|
|
| |
| def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): |
| shape = ( |
| batch_size, |
| num_channels_latents, |
| int(height) // self.vae_scale_factor, |
| int(width) // self.vae_scale_factor, |
| ) |
| if isinstance(generator, list) and len(generator) != batch_size: |
| raise ValueError( |
| f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
| f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
| ) |
|
|
| if latents is None: |
| latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
| else: |
| latents = latents.to(device) |
|
|
| |
| latents = latents * self.scheduler.init_noise_sigma |
| return latents |
| |
| def _encode_prompt(self, analogy_prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): |
| r""" |
| Encodes the prompt into text encoder hidden states. |
| |
| Args: |
| prompt (`str` or `List[str]`): |
| prompt to be encoded |
| device: (`torch.device`): |
| torch device |
| num_images_per_prompt (`int`): |
| number of images that should be generated per prompt |
| do_classifier_free_guidance (`bool`): |
| whether to use classifier free guidance or not |
| negative_prompt (`str` or `List[str]`): |
| The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored |
| if `guidance_scale` is less than `1`). |
| """ |
| weight_dtype = self.unet.dtype |
| dino_input, siglip_input = self.analogy_input_processor(analogy_prompt) |
| dino_input = dino_input.to(device=device).to(dtype=weight_dtype) |
| siglip_input = siglip_input.to(device=device).to(dtype=weight_dtype) |
| batch_size = dino_input.shape[1] |
| dino_input_reshaped = einops.rearrange(dino_input, "k b c h w -> (k b) c h w") |
| siglip_input_reshaped = einops.rearrange(siglip_input, "k b c h w -> (k b) c h w") |
| dino_enc, siglip_enc = self.analogy_encoder(dino_input_reshaped, siglip_input_reshaped) |
| image_embeddings = self.analogy_projector(dino_enc, siglip_enc, batch_size) |
| |
| |
| bs_embed, seq_len, _ = image_embeddings.shape |
| image_embeddings = image_embeddings.repeat(num_images_per_prompt, 1, 1) |
| |
| if do_classifier_free_guidance: |
| uncond_images: List[str] |
| if negative_prompt is None: |
| uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size |
| elif type(negative_prompt) is not type(analogy_prompt): |
| raise TypeError( |
| f"`negative_prompt` should be the same type to `prompt`, but got {type(analogy_prompt)} !=" |
| f" {type(negative_prompt)}." |
| ) |
| elif isinstance(negative_prompt, PIL.Image.Image): |
| uncond_images = [negative_prompt] |
| elif batch_size != len(negative_prompt): |
| raise ValueError( |
| f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" |
| f" {analogy_prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" |
| " the batch size of `prompt`." |
| ) |
| else: |
| uncond_images = negative_prompt |
| dino_neg, siglip_neg = self.analogy_input_processor.get_negative(dino_input, siglip_input) |
| |
| dino_neg = dino_neg.to(device=device).to(dtype=weight_dtype) |
| siglip_neg = siglip_neg.to(device=device).to(dtype=weight_dtype) |
| dino_neg_reshaped = einops.rearrange(dino_neg, "k b c h w -> (k b) c h w") |
| siglip_neg_reshaped = einops.rearrange(siglip_neg, "k b c h w -> (k b) c h w") |
| dino_neg_enc, siglip_neg_enc = self.analogy_encoder(dino_neg_reshaped, siglip_neg_reshaped) |
| negative_prompt_embeds = self.analogy_projector(dino_neg_enc, siglip_neg_enc, batch_size) |
| |
| negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1, 1) |
| image_embeddings = th.cat([negative_prompt_embeds, image_embeddings]) |
|
|
|
|
| return image_embeddings |
| |
| @th.no_grad() |
| def __call__( |
| self, |
| analogy_prompt: Union[str, List[str]] = None, |
| num_inference_steps: int = 50, |
| guidance_scale: float = 7.5, |
| height: Optional[int] = None, |
| width: Optional[int] = None, |
| negative_analogy_prompt: Optional[Union[str, List[str]]] = None, |
| num_images_per_prompt: Optional[int] = 1, |
| eta: float = 0.0, |
| generator: Optional[Union[th.Generator, List[th.Generator]]] = None, |
| latents: Optional[th.FloatTensor] = None, |
| output_type: Optional[str] = "pil", |
| return_dict: bool = True, |
| callback: Optional[Callable[[int, int, th.Tensor], None]] = None, |
| callback_steps: int = 1, |
| start_step: int = 0, |
| ): |
| r""" |
| The call function to the pipeline for generation. |
| |
| Args: |
| analogy_prompt (`List[Tuple[PIL.Image.Image]]'): |
| The analogy sequence A, A*, B which is our model's prompt for generating B* the analogical pattern satisfying A:A*::B:B*. |
| height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): |
| The height in pixels of the generated image. |
| width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): |
| The width in pixels of the generated image. |
| num_inference_steps (`int`, *optional*, defaults to 50): |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the |
| expense of slower inference. |
| guidance_scale (`float`, *optional*, defaults to 7.5): |
| A higher guidance scale value encourages the model to generate images closely linked to the text |
| `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. |
| negative_prompt (`str` or `List[str]`, *optional*): |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to |
| pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). |
| num_images_per_prompt (`int`, *optional*, defaults to 1): |
| The number of images to generate per prompt. |
| eta (`float`, *optional*, defaults to 0.0): |
| Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies |
| to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. |
| generator (`torch.Generator`, *optional*): |
| A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make |
| generation deterministic. |
| latents (`torch.Tensor`, *optional*): |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents |
| tensor is generated by sampling using the supplied random `generator`. |
| output_type (`str`, *optional*, defaults to `"pil"`): |
| The output format of the generated image. Choose between `PIL.Image` or `np.array`. |
| return_dict (`bool`, *optional*, defaults to `True`): |
| Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a |
| plain tuple. |
| callback (`Callable`, *optional*): |
| A function that calls every `callback_steps` steps during inference. The function is called with the |
| following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. |
| callback_steps (`int`, *optional*, defaults to 1): |
| The frequency at which the `callback` function is called. If not specified, the callback is called at |
| every step. |
| |
| Examples: |
| |
| ```py |
| import requests |
| import torch as th |
| from PIL import Image |
| from io import BytesIO |
| import matplotlib.pyplot as plt |
| from PIL import Image, ImageOps |
| from diffusers import DiffusionPipeline |
| |
| SEED = 1729 |
| DEVICE = th.device("cuda") |
| DTYPE = th.float16 |
| FIG_K = 3 |
| EXAMPLE_ID = 0 |
| |
| # Now we need to do the trick |
| pretrained_path = "bardofcodes/pattern_analogies" |
| new_pipe = DiffusionPipeline.from_pretrained( |
| pretrained_path, |
| custom_pipeline=pretrained_path, |
| trust_remote_code=True |
| ) |
| |
| img_urls = [ |
| f"https://huggingface.co/bardofcodes/pattern_analogies/resolve/main/examples/{EXAMPLE_ID}_a.png", |
| f"https://huggingface.co/bardofcodes/pattern_analogies/resolve/main/examples/{EXAMPLE_ID}_a_star.png", |
| f"https://huggingface.co/bardofcodes/pattern_analogies/resolve/main/examples/{EXAMPLE_ID}_b.png", |
| ] |
| images = [] |
| for url in img_urls: |
| response = requests.get(url) |
| image = Image.open(BytesIO(response.content)).convert("RGB") |
| images.append(image) |
| |
| pipe_input = [tuple(images)] |
| |
| pipe = new_pipe.to(DEVICE, DTYPE) |
| var_images = pipe(pipe_input, num_inference_steps=50, num_images_per_prompt=3,).images |
| |
| plt.figure(figsize=(3*FIG_K, 2*FIG_K)) |
| plt.axis('off') |
| plt.legend(framealpha=1) |
| plt.rcParams['legend.fontsize'] = 'large' |
| for i in range(6): |
| if i == 0: |
| plt.subplot(2, 3, i+1) |
| val_image = img1 |
| label_str = "A" |
| elif i == 1: |
| plt.subplot(2, 3, i+1) |
| val_image = alt_img1 |
| label_str = "A*" |
| elif i == 2: |
| plt.subplot(2, 3, i+1) |
| val_image = img2 |
| label_str = "Target" |
| else: |
| plt.subplot(2, 3,i + 1) |
| val_image = var_images[i-3] |
| label_str = f"Variation {i-2}" |
| |
| val_image = ImageOps.expand(val_image,border=2,fill='black') |
| plt.imshow(val_image) |
| plt.scatter([], [], c="r", label=label_str) |
| plt.legend(loc="lower right") |
| plt.axis('off') |
| plt.subplots_adjust(wspace=0.01, hspace=0.01) |
| ``` |
| |
| Returns: |
| [`~ImagePipelineOutput`] or `tuple` |
| The generated image(s) as a [`~ImagePipelineOutput`] or a tuple of images. |
| """ |
|
|
| |
| height = height or self.unet.config.sample_size * self.vae_scale_factor |
| width = width or self.unet.config.sample_size * self.vae_scale_factor |
|
|
| |
| self.check_inputs(analogy_prompt, negative_analogy_prompt, height, width, callback_steps) |
| |
| |
| if isinstance(analogy_prompt, list): |
| batch_size = len(analogy_prompt) |
| elif isinstance(analogy_prompt, tuple): |
| batch_size = 1 |
| else: |
| raise ValueError( |
| f"`analogy_prompt` has to be a list of images or a tuple of images but is of type {type(analogy_prompt)}" |
| ) |
| device = self._execution_device |
| |
| |
| |
| do_classifier_free_guidance = guidance_scale > 1.0 |
|
|
| |
| analogy_embeddings = self._encode_prompt( |
| analogy_prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_analogy_prompt |
| ) |
|
|
| |
| self.scheduler.set_timesteps(num_inference_steps, device=device) |
| |
| timesteps = self.scheduler.timesteps |
| |
| timesteps = timesteps[start_step:] |
| |
| num_channels_latents = self.unet.config.in_channels |
| latents = self.prepare_latents( |
| batch_size * num_images_per_prompt, |
| num_channels_latents, |
| height, |
| width, |
| analogy_embeddings.dtype, |
| device, |
| generator, |
| latents, |
| ) |
|
|
| |
| extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
|
|
| |
| for i, t in enumerate(self.progress_bar(timesteps)): |
| |
| latent_model_input = th.cat([latents] * 2) if do_classifier_free_guidance else latents |
| latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
|
|
| |
| noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=analogy_embeddings).sample |
|
|
| |
| if do_classifier_free_guidance: |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
| |
| latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample |
|
|
| |
| if callback is not None and i % callback_steps == 0: |
| step_idx = i // getattr(self.scheduler, "order", 1) |
| callback(step_idx, t, latents) |
|
|
| if not output_type == "latent": |
| image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] |
| else: |
| image = latents |
|
|
| image = self.image_processor.postprocess(image, output_type=output_type) |
|
|
| if not return_dict: |
| return (image,) |
|
|
| return ImagePipelineOutput(images=image) |