| import ipdb |
| from accelerate import Accelerator |
| from diffusers.configuration_utils import register_to_config |
| from diffusers.pipelines import FluxPipeline |
| from typing import Any, Callable, Dict, List, Optional, Union |
| import torch |
| from .condition import Condition |
| from diffusers.pipelines.flux.pipeline_flux import ( |
| FluxPipelineOutput, |
| calculate_shift, |
| retrieve_timesteps, |
| np, |
| ) |
| from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast |
| from diffusers.schedulers import FlowMatchEulerDiscreteScheduler |
| from diffusers.models import AutoencoderKL,FluxTransformer2DModel |
|
|
|
|
| class SubjectGeniusPipeline(FluxPipeline): |
| @register_to_config |
| def __init__( |
| self, |
| scheduler: FlowMatchEulerDiscreteScheduler, |
| vae: AutoencoderKL, |
| text_encoder: CLIPTextModel, |
| tokenizer: CLIPTokenizer, |
| text_encoder_2: T5EncoderModel, |
| tokenizer_2: T5TokenizerFast, |
| transformer: FluxTransformer2DModel, |
| image_encoder = None, |
| feature_extractor = None, |
| ): |
| super().__init__( |
| scheduler=scheduler, |
| vae=vae, |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| text_encoder_2=text_encoder_2, |
| tokenizer_2=tokenizer_2, |
| transformer=transformer, |
| image_encoder = image_encoder, |
| feature_extractor = feature_extractor, |
| ) |
| @property |
| def all_adapters(self): |
| list_adapters = self.get_list_adapters() |
| |
| all_adapters = list({adapter for adapters in list_adapters.values() for adapter in adapters}) |
| return all_adapters |
|
|
| @torch.no_grad() |
| def __call__(self, |
| prompt: Union[str, List[str]] = None, |
| prompt_2: Optional[Union[str, List[str]]] = None, |
| |
| conditions: List[Condition] = None, |
| model_config: Optional[Dict[str, Any]] = {}, |
| condition_scale: float = 1.0, |
| |
| height: Optional[int] = 512, |
| width: Optional[int] = 512, |
| num_inference_steps: int = 28, |
| timesteps: List[int] = None, |
| guidance_scale: float = 3.5, |
| num_images_per_prompt: Optional[int] = 1, |
| generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
| latents: Optional[torch.FloatTensor] = None, |
| prompt_embeds: Optional[torch.FloatTensor] = None, |
| pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
| output_type: Optional[str] = "pil", |
| return_dict: bool = True, |
| joint_attention_kwargs: Optional[Dict[str, Any]] = None, |
| callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, |
| callback_on_step_end_tensor_inputs: List[str] = ["latents"], |
| max_sequence_length: int = 512, |
| accelerator: Accelerator = None, |
| ): |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| height = height or self.default_sample_size * self.vae_scale_factor |
| width = width or self.default_sample_size * self.vae_scale_factor |
|
|
| |
| self.check_inputs( |
| prompt, |
| prompt_2, |
| height, |
| width, |
| prompt_embeds=prompt_embeds, |
| pooled_prompt_embeds=pooled_prompt_embeds, |
| callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, |
| max_sequence_length=max_sequence_length, |
| ) |
|
|
| self._guidance_scale = guidance_scale |
| self._joint_attention_kwargs = joint_attention_kwargs |
| self._interrupt = False |
|
|
| |
| if prompt is not None and isinstance(prompt, str): |
| batch_size = 1 |
| elif prompt is not None and isinstance(prompt, list): |
| batch_size = len(prompt) |
| else: |
| batch_size = prompt_embeds.shape[0] |
| device = self._execution_device |
|
|
| lora_scale = ( |
| self.joint_attention_kwargs.get("scale", None) |
| if self.joint_attention_kwargs is not None |
| else None |
| ) |
| ( |
| prompt_embeds, |
| pooled_prompt_embeds, |
| text_ids, |
| ) = self.encode_prompt( |
| prompt=prompt, |
| prompt_2=prompt_2, |
| prompt_embeds=prompt_embeds, |
| pooled_prompt_embeds=pooled_prompt_embeds, |
| device=device, |
| num_images_per_prompt=num_images_per_prompt, |
| max_sequence_length=max_sequence_length, |
| lora_scale=lora_scale, |
| ) |
|
|
| |
| num_channels_latents = self.transformer.config.in_channels // 4 |
| latents, latent_image_ids = self.prepare_latents( |
| batch_size * num_images_per_prompt, |
| num_channels_latents, |
| height, |
| width, |
| prompt_embeds.dtype, |
| device, |
| generator, |
| latents, |
| ) |
| |
| condition_latents, condition_ids, condition_type_ids, condition_types = ([] for _ in range(4)) |
| use_condition = conditions is not None |
|
|
| if use_condition: |
| for condition in conditions: |
| tokens,ids,type_id = condition.encode(self) |
| condition_latents.append(tokens) |
| condition_ids.append(ids) |
| condition_type_ids.append(type_id) |
| condition_types.append(condition.condition_type) |
|
|
| |
| sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) |
| image_seq_len = latents.shape[1] |
| mu = calculate_shift( |
| image_seq_len, |
| self.scheduler.config.base_image_seq_len, |
| self.scheduler.config.max_image_seq_len, |
| self.scheduler.config.base_shift, |
| self.scheduler.config.max_shift, |
| ) |
| timesteps, num_inference_steps = retrieve_timesteps( |
| self.scheduler, |
| num_inference_steps, |
| device, |
| timesteps, |
| sigmas, |
| mu=mu, |
| ) |
| num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) |
| self._num_timesteps = len(timesteps) |
|
|
| |
| |
| if self.transformer.config.guidance_embeds: |
| guidance = torch.full([1], guidance_scale, device=device, dtype=latents.dtype) |
| guidance = guidance.expand(latents.shape[0]) |
| else: |
| guidance = None |
|
|
| |
| with self.progress_bar(total=num_inference_steps) as progress_bar: |
| for i, t in enumerate(timesteps): |
| if self.interrupt: |
| continue |
| |
| timestep = t.expand(latents.shape[0]).to(latents.dtype) |
| noise_pred, conditional_output = self.transformer( |
| model_config=model_config, |
| |
| condition_latents=condition_latents if use_condition else None, |
| condition_ids=condition_ids if use_condition else None, |
| condition_type_ids=condition_type_ids if use_condition else None, |
| condition_types = condition_types if use_condition else None, |
| return_condition_latents = model_config.get("return_condition_latents",False), |
| |
| hidden_states=latents, |
| timestep=timestep / 1000, |
| guidance=guidance, |
| pooled_projections=pooled_prompt_embeds, |
| encoder_hidden_states=prompt_embeds, |
| txt_ids=text_ids, |
| img_ids=latent_image_ids, |
| joint_attention_kwargs=self.joint_attention_kwargs, |
| return_dict=False, |
| ) |
| |
| latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] |
|
|
| |
| if callback_on_step_end is not None: |
| callback_kwargs = {} |
| for k in callback_on_step_end_tensor_inputs: |
| callback_kwargs[k] = locals()[k] |
| callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) |
|
|
| latents = callback_outputs.pop("latents", latents) |
| prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) |
|
|
| |
| if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
| progress_bar.update() |
|
|
| |
| if output_type == "latent": |
| image = latents |
| else: |
| latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) |
| latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor |
| image = self.vae.decode(latents, return_dict=False)[0] |
| image = self.image_processor.postprocess(image, output_type=output_type) |
|
|
| |
| self.maybe_free_model_hooks() |
|
|
| if not return_dict: |
| return (image,conditional_output) if model_config.get("return_condition_latents",False) else (image,) |
|
|
| return FluxPipelineOutput(images=image) |
|
|