Move pipeline_cogvideox_fun_inpaint.py to diffusers/
Browse files- pipeline_cogvideox_fun_inpaint.py +0 -1244
pipeline_cogvideox_fun_inpaint.py
DELETED
|
@@ -1,1244 +0,0 @@
|
|
| 1 |
-
# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
|
| 2 |
-
# All rights reserved.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
|
| 16 |
-
import inspect
|
| 17 |
-
import math
|
| 18 |
-
from dataclasses import dataclass
|
| 19 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 20 |
-
|
| 21 |
-
import numpy as np
|
| 22 |
-
import torch
|
| 23 |
-
import torch.nn.functional as F
|
| 24 |
-
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 25 |
-
from diffusers.image_processor import VaeImageProcessor
|
| 26 |
-
from diffusers.models.embeddings import get_1d_rotary_pos_embed
|
| 27 |
-
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 28 |
-
from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
|
| 29 |
-
from diffusers.utils import BaseOutput, logging, replace_example_docstring
|
| 30 |
-
from diffusers.utils.torch_utils import randn_tensor
|
| 31 |
-
from diffusers.video_processor import VideoProcessor
|
| 32 |
-
from einops import rearrange
|
| 33 |
-
|
| 34 |
-
from transformers import T5EncoderModel, T5Tokenizer
|
| 35 |
-
from cogvideox_transformer3d import CogVideoXTransformer3DModel
|
| 36 |
-
from cogvideox_vae import AutoencoderKLCogVideoX
|
| 37 |
-
|
| 38 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
EXAMPLE_DOC_STRING = """
|
| 42 |
-
Examples:
|
| 43 |
-
```python
|
| 44 |
-
pass
|
| 45 |
-
```
|
| 46 |
-
"""
|
| 47 |
-
|
| 48 |
-
# Copied from diffusers.models.embeddings.get_3d_rotary_pos_embed
|
| 49 |
-
def get_3d_rotary_pos_embed(
|
| 50 |
-
embed_dim,
|
| 51 |
-
crops_coords,
|
| 52 |
-
grid_size,
|
| 53 |
-
temporal_size,
|
| 54 |
-
theta: int = 10000,
|
| 55 |
-
use_real: bool = True,
|
| 56 |
-
grid_type: str = "linspace",
|
| 57 |
-
max_size: Optional[Tuple[int, int]] = None,
|
| 58 |
-
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
|
| 59 |
-
"""
|
| 60 |
-
RoPE for video tokens with 3D structure.
|
| 61 |
-
|
| 62 |
-
Args:
|
| 63 |
-
embed_dim: (`int`):
|
| 64 |
-
The embedding dimension size, corresponding to hidden_size_head.
|
| 65 |
-
crops_coords (`Tuple[int]`):
|
| 66 |
-
The top-left and bottom-right coordinates of the crop.
|
| 67 |
-
grid_size (`Tuple[int]`):
|
| 68 |
-
The grid size of the spatial positional embedding (height, width).
|
| 69 |
-
temporal_size (`int`):
|
| 70 |
-
The size of the temporal dimension.
|
| 71 |
-
theta (`float`):
|
| 72 |
-
Scaling factor for frequency computation.
|
| 73 |
-
grid_type (`str`):
|
| 74 |
-
Whether to use "linspace" or "slice" to compute grids.
|
| 75 |
-
|
| 76 |
-
Returns:
|
| 77 |
-
`torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`.
|
| 78 |
-
"""
|
| 79 |
-
if use_real is not True:
|
| 80 |
-
raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed")
|
| 81 |
-
|
| 82 |
-
if grid_type == "linspace":
|
| 83 |
-
start, stop = crops_coords
|
| 84 |
-
grid_size_h, grid_size_w = grid_size
|
| 85 |
-
grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32)
|
| 86 |
-
grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32)
|
| 87 |
-
grid_t = np.arange(temporal_size, dtype=np.float32)
|
| 88 |
-
grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32)
|
| 89 |
-
elif grid_type == "slice":
|
| 90 |
-
max_h, max_w = max_size
|
| 91 |
-
grid_size_h, grid_size_w = grid_size
|
| 92 |
-
grid_h = np.arange(max_h, dtype=np.float32)
|
| 93 |
-
grid_w = np.arange(max_w, dtype=np.float32)
|
| 94 |
-
grid_t = np.arange(temporal_size, dtype=np.float32)
|
| 95 |
-
else:
|
| 96 |
-
raise ValueError("Invalid value passed for `grid_type`.")
|
| 97 |
-
|
| 98 |
-
# Compute dimensions for each axis
|
| 99 |
-
dim_t = embed_dim // 4
|
| 100 |
-
dim_h = embed_dim // 8 * 3
|
| 101 |
-
dim_w = embed_dim // 8 * 3
|
| 102 |
-
|
| 103 |
-
# Temporal frequencies
|
| 104 |
-
freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True)
|
| 105 |
-
# Spatial frequencies for height and width
|
| 106 |
-
freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True)
|
| 107 |
-
freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True)
|
| 108 |
-
|
| 109 |
-
# BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor
|
| 110 |
-
def combine_time_height_width(freqs_t, freqs_h, freqs_w):
|
| 111 |
-
freqs_t = freqs_t[:, None, None, :].expand(
|
| 112 |
-
-1, grid_size_h, grid_size_w, -1
|
| 113 |
-
) # temporal_size, grid_size_h, grid_size_w, dim_t
|
| 114 |
-
freqs_h = freqs_h[None, :, None, :].expand(
|
| 115 |
-
temporal_size, -1, grid_size_w, -1
|
| 116 |
-
) # temporal_size, grid_size_h, grid_size_2, dim_h
|
| 117 |
-
freqs_w = freqs_w[None, None, :, :].expand(
|
| 118 |
-
temporal_size, grid_size_h, -1, -1
|
| 119 |
-
) # temporal_size, grid_size_h, grid_size_2, dim_w
|
| 120 |
-
|
| 121 |
-
freqs = torch.cat(
|
| 122 |
-
[freqs_t, freqs_h, freqs_w], dim=-1
|
| 123 |
-
) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w)
|
| 124 |
-
freqs = freqs.view(
|
| 125 |
-
temporal_size * grid_size_h * grid_size_w, -1
|
| 126 |
-
) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w)
|
| 127 |
-
return freqs
|
| 128 |
-
|
| 129 |
-
t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t
|
| 130 |
-
h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h
|
| 131 |
-
w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w
|
| 132 |
-
|
| 133 |
-
if grid_type == "slice":
|
| 134 |
-
t_cos, t_sin = t_cos[:temporal_size], t_sin[:temporal_size]
|
| 135 |
-
h_cos, h_sin = h_cos[:grid_size_h], h_sin[:grid_size_h]
|
| 136 |
-
w_cos, w_sin = w_cos[:grid_size_w], w_sin[:grid_size_w]
|
| 137 |
-
|
| 138 |
-
cos = combine_time_height_width(t_cos, h_cos, w_cos)
|
| 139 |
-
sin = combine_time_height_width(t_sin, h_sin, w_sin)
|
| 140 |
-
return cos, sin
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
|
| 144 |
-
def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
|
| 145 |
-
tw = tgt_width
|
| 146 |
-
th = tgt_height
|
| 147 |
-
h, w = src
|
| 148 |
-
r = h / w
|
| 149 |
-
if r > (th / tw):
|
| 150 |
-
resize_height = th
|
| 151 |
-
resize_width = int(round(th / h * w))
|
| 152 |
-
else:
|
| 153 |
-
resize_width = tw
|
| 154 |
-
resize_height = int(round(tw / w * h))
|
| 155 |
-
|
| 156 |
-
crop_top = int(round((th - resize_height) / 2.0))
|
| 157 |
-
crop_left = int(round((tw - resize_width) / 2.0))
|
| 158 |
-
|
| 159 |
-
return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 163 |
-
def retrieve_timesteps(
|
| 164 |
-
scheduler,
|
| 165 |
-
num_inference_steps: Optional[int] = None,
|
| 166 |
-
device: Optional[Union[str, torch.device]] = None,
|
| 167 |
-
timesteps: Optional[List[int]] = None,
|
| 168 |
-
sigmas: Optional[List[float]] = None,
|
| 169 |
-
**kwargs,
|
| 170 |
-
):
|
| 171 |
-
"""
|
| 172 |
-
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 173 |
-
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 174 |
-
|
| 175 |
-
Args:
|
| 176 |
-
scheduler (`SchedulerMixin`):
|
| 177 |
-
The scheduler to get timesteps from.
|
| 178 |
-
num_inference_steps (`int`):
|
| 179 |
-
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 180 |
-
must be `None`.
|
| 181 |
-
device (`str` or `torch.device`, *optional*):
|
| 182 |
-
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 183 |
-
timesteps (`List[int]`, *optional*):
|
| 184 |
-
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 185 |
-
`num_inference_steps` and `sigmas` must be `None`.
|
| 186 |
-
sigmas (`List[float]`, *optional*):
|
| 187 |
-
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 188 |
-
`num_inference_steps` and `timesteps` must be `None`.
|
| 189 |
-
|
| 190 |
-
Returns:
|
| 191 |
-
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 192 |
-
second element is the number of inference steps.
|
| 193 |
-
"""
|
| 194 |
-
if timesteps is not None and sigmas is not None:
|
| 195 |
-
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 196 |
-
if timesteps is not None:
|
| 197 |
-
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 198 |
-
if not accepts_timesteps:
|
| 199 |
-
raise ValueError(
|
| 200 |
-
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 201 |
-
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 202 |
-
)
|
| 203 |
-
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 204 |
-
timesteps = scheduler.timesteps
|
| 205 |
-
num_inference_steps = len(timesteps)
|
| 206 |
-
elif sigmas is not None:
|
| 207 |
-
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 208 |
-
if not accept_sigmas:
|
| 209 |
-
raise ValueError(
|
| 210 |
-
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 211 |
-
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 212 |
-
)
|
| 213 |
-
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 214 |
-
timesteps = scheduler.timesteps
|
| 215 |
-
num_inference_steps = len(timesteps)
|
| 216 |
-
else:
|
| 217 |
-
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 218 |
-
timesteps = scheduler.timesteps
|
| 219 |
-
return timesteps, num_inference_steps
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
def resize_mask(mask, latent, process_first_frame_only=True):
|
| 223 |
-
latent_size = latent.size()
|
| 224 |
-
batch_size, channels, num_frames, height, width = mask.shape
|
| 225 |
-
|
| 226 |
-
if process_first_frame_only:
|
| 227 |
-
target_size = list(latent_size[2:])
|
| 228 |
-
target_size[0] = 1
|
| 229 |
-
first_frame_resized = F.interpolate(
|
| 230 |
-
mask[:, :, 0:1, :, :],
|
| 231 |
-
size=target_size,
|
| 232 |
-
mode='trilinear',
|
| 233 |
-
align_corners=False
|
| 234 |
-
)
|
| 235 |
-
|
| 236 |
-
target_size = list(latent_size[2:])
|
| 237 |
-
target_size[0] = target_size[0] - 1
|
| 238 |
-
if target_size[0] != 0:
|
| 239 |
-
remaining_frames_resized = F.interpolate(
|
| 240 |
-
mask[:, :, 1:, :, :],
|
| 241 |
-
size=target_size,
|
| 242 |
-
mode='trilinear',
|
| 243 |
-
align_corners=False
|
| 244 |
-
)
|
| 245 |
-
resized_mask = torch.cat([first_frame_resized, remaining_frames_resized], dim=2)
|
| 246 |
-
else:
|
| 247 |
-
resized_mask = first_frame_resized
|
| 248 |
-
else:
|
| 249 |
-
target_size = list(latent_size[2:])
|
| 250 |
-
resized_mask = F.interpolate(
|
| 251 |
-
mask,
|
| 252 |
-
size=target_size,
|
| 253 |
-
mode='trilinear',
|
| 254 |
-
align_corners=False
|
| 255 |
-
)
|
| 256 |
-
return resized_mask
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
def add_noise_to_reference_video(image, ratio=None):
|
| 260 |
-
if ratio is None:
|
| 261 |
-
sigma = torch.normal(mean=-3.0, std=0.5, size=(image.shape[0],)).to(image.device)
|
| 262 |
-
sigma = torch.exp(sigma).to(image.dtype)
|
| 263 |
-
else:
|
| 264 |
-
sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * ratio
|
| 265 |
-
|
| 266 |
-
image_noise = torch.randn_like(image) * sigma[:, None, None, None, None]
|
| 267 |
-
image_noise = torch.where(image==-1, torch.zeros_like(image), image_noise)
|
| 268 |
-
image = image + image_noise
|
| 269 |
-
return image
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
@dataclass
|
| 273 |
-
class CogVideoXFunPipelineOutput(BaseOutput):
|
| 274 |
-
r"""
|
| 275 |
-
Output class for CogVideo pipelines.
|
| 276 |
-
|
| 277 |
-
Args:
|
| 278 |
-
video (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
|
| 279 |
-
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
|
| 280 |
-
denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
|
| 281 |
-
`(batch_size, num_frames, channels, height, width)`.
|
| 282 |
-
"""
|
| 283 |
-
|
| 284 |
-
videos: torch.Tensor
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
class CogVideoXFunInpaintPipeline(DiffusionPipeline):
|
| 288 |
-
r"""
|
| 289 |
-
Pipeline for text-to-video generation using CogVideoX.
|
| 290 |
-
|
| 291 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 292 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 293 |
-
|
| 294 |
-
Args:
|
| 295 |
-
vae ([`AutoencoderKL`]):
|
| 296 |
-
Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
|
| 297 |
-
text_encoder ([`T5EncoderModel`]):
|
| 298 |
-
Frozen text-encoder. CogVideoX_Fun uses
|
| 299 |
-
[T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
|
| 300 |
-
[t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
|
| 301 |
-
tokenizer (`T5Tokenizer`):
|
| 302 |
-
Tokenizer of class
|
| 303 |
-
[T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
|
| 304 |
-
transformer ([`CogVideoXTransformer3DModel`]):
|
| 305 |
-
A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
|
| 306 |
-
scheduler ([`SchedulerMixin`]):
|
| 307 |
-
A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
|
| 308 |
-
"""
|
| 309 |
-
|
| 310 |
-
_optional_components = []
|
| 311 |
-
model_cpu_offload_seq = "text_encoder->transformer->vae"
|
| 312 |
-
|
| 313 |
-
_callback_tensor_inputs = [
|
| 314 |
-
"latents",
|
| 315 |
-
"prompt_embeds",
|
| 316 |
-
"negative_prompt_embeds",
|
| 317 |
-
]
|
| 318 |
-
|
| 319 |
-
def __init__(
|
| 320 |
-
self,
|
| 321 |
-
tokenizer: T5Tokenizer,
|
| 322 |
-
text_encoder: T5EncoderModel,
|
| 323 |
-
vae: AutoencoderKLCogVideoX,
|
| 324 |
-
transformer: CogVideoXTransformer3DModel,
|
| 325 |
-
scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
|
| 326 |
-
):
|
| 327 |
-
super().__init__()
|
| 328 |
-
|
| 329 |
-
self.register_modules(
|
| 330 |
-
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
|
| 331 |
-
)
|
| 332 |
-
self.vae_scale_factor_spatial = (
|
| 333 |
-
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
|
| 334 |
-
)
|
| 335 |
-
self.vae_scale_factor_temporal = (
|
| 336 |
-
self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
|
| 337 |
-
)
|
| 338 |
-
|
| 339 |
-
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
|
| 340 |
-
|
| 341 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 342 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 343 |
-
self.mask_processor = VaeImageProcessor(
|
| 344 |
-
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=False, do_convert_grayscale=True
|
| 345 |
-
)
|
| 346 |
-
|
| 347 |
-
def _get_t5_prompt_embeds(
|
| 348 |
-
self,
|
| 349 |
-
prompt: Union[str, List[str]] = None,
|
| 350 |
-
num_videos_per_prompt: int = 1,
|
| 351 |
-
max_sequence_length: int = 226,
|
| 352 |
-
device: Optional[torch.device] = None,
|
| 353 |
-
dtype: Optional[torch.dtype] = None,
|
| 354 |
-
):
|
| 355 |
-
device = device or self._execution_device
|
| 356 |
-
dtype = dtype or self.text_encoder.dtype
|
| 357 |
-
|
| 358 |
-
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 359 |
-
batch_size = len(prompt)
|
| 360 |
-
|
| 361 |
-
text_inputs = self.tokenizer(
|
| 362 |
-
prompt,
|
| 363 |
-
padding="max_length",
|
| 364 |
-
max_length=max_sequence_length,
|
| 365 |
-
truncation=True,
|
| 366 |
-
add_special_tokens=True,
|
| 367 |
-
return_tensors="pt",
|
| 368 |
-
)
|
| 369 |
-
text_input_ids = text_inputs.input_ids
|
| 370 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 371 |
-
|
| 372 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 373 |
-
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
|
| 374 |
-
logger.warning(
|
| 375 |
-
"The following part of your input was truncated because `max_sequence_length` is set to "
|
| 376 |
-
f" {max_sequence_length} tokens: {removed_text}"
|
| 377 |
-
)
|
| 378 |
-
|
| 379 |
-
prompt_embeds = self.text_encoder(text_input_ids.to(device))[0]
|
| 380 |
-
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
| 381 |
-
|
| 382 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 383 |
-
_, seq_len, _ = prompt_embeds.shape
|
| 384 |
-
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
| 385 |
-
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
|
| 386 |
-
|
| 387 |
-
return prompt_embeds
|
| 388 |
-
|
| 389 |
-
def encode_prompt(
|
| 390 |
-
self,
|
| 391 |
-
prompt: Union[str, List[str]],
|
| 392 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 393 |
-
do_classifier_free_guidance: bool = True,
|
| 394 |
-
num_videos_per_prompt: int = 1,
|
| 395 |
-
prompt_embeds: Optional[torch.Tensor] = None,
|
| 396 |
-
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 397 |
-
max_sequence_length: int = 226,
|
| 398 |
-
device: Optional[torch.device] = None,
|
| 399 |
-
dtype: Optional[torch.dtype] = None,
|
| 400 |
-
):
|
| 401 |
-
r"""
|
| 402 |
-
Encodes the prompt into text encoder hidden states.
|
| 403 |
-
|
| 404 |
-
Args:
|
| 405 |
-
prompt (`str` or `List[str]`, *optional*):
|
| 406 |
-
prompt to be encoded
|
| 407 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
| 408 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 409 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 410 |
-
less than `1`).
|
| 411 |
-
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
|
| 412 |
-
Whether to use classifier free guidance or not.
|
| 413 |
-
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 414 |
-
Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
|
| 415 |
-
prompt_embeds (`torch.Tensor`, *optional*):
|
| 416 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 417 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
| 418 |
-
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 419 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 420 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 421 |
-
argument.
|
| 422 |
-
device: (`torch.device`, *optional*):
|
| 423 |
-
torch device
|
| 424 |
-
dtype: (`torch.dtype`, *optional*):
|
| 425 |
-
torch dtype
|
| 426 |
-
"""
|
| 427 |
-
device = device or self._execution_device
|
| 428 |
-
|
| 429 |
-
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 430 |
-
if prompt is not None:
|
| 431 |
-
batch_size = len(prompt)
|
| 432 |
-
else:
|
| 433 |
-
batch_size = prompt_embeds.shape[0]
|
| 434 |
-
|
| 435 |
-
if prompt_embeds is None:
|
| 436 |
-
prompt_embeds = self._get_t5_prompt_embeds(
|
| 437 |
-
prompt=prompt,
|
| 438 |
-
num_videos_per_prompt=num_videos_per_prompt,
|
| 439 |
-
max_sequence_length=max_sequence_length,
|
| 440 |
-
device=device,
|
| 441 |
-
dtype=dtype,
|
| 442 |
-
)
|
| 443 |
-
|
| 444 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 445 |
-
negative_prompt = negative_prompt or ""
|
| 446 |
-
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 447 |
-
|
| 448 |
-
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 449 |
-
raise TypeError(
|
| 450 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 451 |
-
f" {type(prompt)}."
|
| 452 |
-
)
|
| 453 |
-
elif batch_size != len(negative_prompt):
|
| 454 |
-
raise ValueError(
|
| 455 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 456 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 457 |
-
" the batch size of `prompt`."
|
| 458 |
-
)
|
| 459 |
-
|
| 460 |
-
negative_prompt_embeds = self._get_t5_prompt_embeds(
|
| 461 |
-
prompt=negative_prompt,
|
| 462 |
-
num_videos_per_prompt=num_videos_per_prompt,
|
| 463 |
-
max_sequence_length=max_sequence_length,
|
| 464 |
-
device=device,
|
| 465 |
-
dtype=dtype,
|
| 466 |
-
)
|
| 467 |
-
|
| 468 |
-
return prompt_embeds, negative_prompt_embeds
|
| 469 |
-
|
| 470 |
-
def prepare_latents(
|
| 471 |
-
self,
|
| 472 |
-
batch_size,
|
| 473 |
-
num_channels_latents,
|
| 474 |
-
height,
|
| 475 |
-
width,
|
| 476 |
-
video_length,
|
| 477 |
-
dtype,
|
| 478 |
-
device,
|
| 479 |
-
generator,
|
| 480 |
-
latents=None,
|
| 481 |
-
video=None,
|
| 482 |
-
timestep=None,
|
| 483 |
-
is_strength_max=True,
|
| 484 |
-
return_noise=False,
|
| 485 |
-
return_video_latents=False,
|
| 486 |
-
):
|
| 487 |
-
shape = (
|
| 488 |
-
batch_size,
|
| 489 |
-
(video_length - 1) // self.vae_scale_factor_temporal + 1,
|
| 490 |
-
num_channels_latents,
|
| 491 |
-
height // self.vae_scale_factor_spatial,
|
| 492 |
-
width // self.vae_scale_factor_spatial,
|
| 493 |
-
)
|
| 494 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
| 495 |
-
raise ValueError(
|
| 496 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 497 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 498 |
-
)
|
| 499 |
-
|
| 500 |
-
if return_video_latents or (latents is None and not is_strength_max):
|
| 501 |
-
video = video.to(device=device, dtype=self.vae.dtype)
|
| 502 |
-
|
| 503 |
-
bs = 1
|
| 504 |
-
new_video = []
|
| 505 |
-
for i in range(0, video.shape[0], bs):
|
| 506 |
-
video_bs = video[i : i + bs]
|
| 507 |
-
video_bs = self.vae.encode(video_bs)[0]
|
| 508 |
-
video_bs = video_bs.sample()
|
| 509 |
-
new_video.append(video_bs)
|
| 510 |
-
video = torch.cat(new_video, dim = 0)
|
| 511 |
-
video = video * self.vae.config.scaling_factor
|
| 512 |
-
|
| 513 |
-
video_latents = video.repeat(batch_size // video.shape[0], 1, 1, 1, 1)
|
| 514 |
-
video_latents = video_latents.to(device=device, dtype=dtype)
|
| 515 |
-
video_latents = rearrange(video_latents, "b c f h w -> b f c h w")
|
| 516 |
-
|
| 517 |
-
if latents is None:
|
| 518 |
-
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 519 |
-
# if strength is 1. then initialise the latents to noise, else initial to image + noise
|
| 520 |
-
latents = noise if is_strength_max else self.scheduler.add_noise(video_latents, noise, timestep)
|
| 521 |
-
# if pure noise then scale the initial latents by the Scheduler's init sigma
|
| 522 |
-
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
|
| 523 |
-
else:
|
| 524 |
-
noise = latents.to(device)
|
| 525 |
-
latents = noise * self.scheduler.init_noise_sigma
|
| 526 |
-
|
| 527 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
| 528 |
-
outputs = (latents,)
|
| 529 |
-
|
| 530 |
-
if return_noise:
|
| 531 |
-
outputs += (noise,)
|
| 532 |
-
|
| 533 |
-
if return_video_latents:
|
| 534 |
-
outputs += (video_latents,)
|
| 535 |
-
|
| 536 |
-
return outputs
|
| 537 |
-
|
| 538 |
-
def prepare_mask_latents(
|
| 539 |
-
self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance, noise_aug_strength
|
| 540 |
-
):
|
| 541 |
-
# resize the mask to latents shape as we concatenate the mask to the latents
|
| 542 |
-
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
| 543 |
-
# and half precision
|
| 544 |
-
|
| 545 |
-
if mask is not None:
|
| 546 |
-
mask = mask.to(device=device, dtype=self.vae.dtype)
|
| 547 |
-
bs = 1
|
| 548 |
-
new_mask = []
|
| 549 |
-
for i in range(0, mask.shape[0], bs):
|
| 550 |
-
mask_bs = mask[i : i + bs]
|
| 551 |
-
mask_bs = self.vae.encode(mask_bs)[0]
|
| 552 |
-
mask_bs = mask_bs.mode()
|
| 553 |
-
new_mask.append(mask_bs)
|
| 554 |
-
mask = torch.cat(new_mask, dim = 0)
|
| 555 |
-
mask = mask * self.vae.config.scaling_factor
|
| 556 |
-
|
| 557 |
-
if masked_image is not None:
|
| 558 |
-
if self.transformer.config.add_noise_in_inpaint_model:
|
| 559 |
-
masked_image = add_noise_to_reference_video(masked_image, ratio=noise_aug_strength)
|
| 560 |
-
masked_image = masked_image.to(device=device, dtype=self.vae.dtype)
|
| 561 |
-
bs = 1
|
| 562 |
-
new_mask_pixel_values = []
|
| 563 |
-
for i in range(0, masked_image.shape[0], bs):
|
| 564 |
-
mask_pixel_values_bs = masked_image[i : i + bs]
|
| 565 |
-
mask_pixel_values_bs = self.vae.encode(mask_pixel_values_bs)[0]
|
| 566 |
-
mask_pixel_values_bs = mask_pixel_values_bs.mode()
|
| 567 |
-
new_mask_pixel_values.append(mask_pixel_values_bs)
|
| 568 |
-
masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0)
|
| 569 |
-
masked_image_latents = masked_image_latents * self.vae.config.scaling_factor
|
| 570 |
-
else:
|
| 571 |
-
masked_image_latents = None
|
| 572 |
-
|
| 573 |
-
return mask, masked_image_latents
|
| 574 |
-
|
| 575 |
-
def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
|
| 576 |
-
latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
|
| 577 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
| 578 |
-
|
| 579 |
-
frames = self.vae.decode(latents).sample
|
| 580 |
-
frames = (frames / 2 + 0.5).clamp(0, 1)
|
| 581 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
| 582 |
-
frames = frames.cpu().float().numpy()
|
| 583 |
-
return frames
|
| 584 |
-
|
| 585 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 586 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
| 587 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 588 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 589 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 590 |
-
# and should be between [0, 1]
|
| 591 |
-
|
| 592 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 593 |
-
extra_step_kwargs = {}
|
| 594 |
-
if accepts_eta:
|
| 595 |
-
extra_step_kwargs["eta"] = eta
|
| 596 |
-
|
| 597 |
-
# check if the scheduler accepts generator
|
| 598 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 599 |
-
if accepts_generator:
|
| 600 |
-
extra_step_kwargs["generator"] = generator
|
| 601 |
-
return extra_step_kwargs
|
| 602 |
-
|
| 603 |
-
# Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs
|
| 604 |
-
def check_inputs(
|
| 605 |
-
self,
|
| 606 |
-
prompt,
|
| 607 |
-
height,
|
| 608 |
-
width,
|
| 609 |
-
negative_prompt,
|
| 610 |
-
callback_on_step_end_tensor_inputs,
|
| 611 |
-
prompt_embeds=None,
|
| 612 |
-
negative_prompt_embeds=None,
|
| 613 |
-
):
|
| 614 |
-
if height % 8 != 0 or width % 8 != 0:
|
| 615 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 616 |
-
|
| 617 |
-
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 618 |
-
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 619 |
-
):
|
| 620 |
-
raise ValueError(
|
| 621 |
-
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 622 |
-
)
|
| 623 |
-
if prompt is not None and prompt_embeds is not None:
|
| 624 |
-
raise ValueError(
|
| 625 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 626 |
-
" only forward one of the two."
|
| 627 |
-
)
|
| 628 |
-
elif prompt is None and prompt_embeds is None:
|
| 629 |
-
raise ValueError(
|
| 630 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 631 |
-
)
|
| 632 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 633 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 634 |
-
|
| 635 |
-
if prompt is not None and negative_prompt_embeds is not None:
|
| 636 |
-
raise ValueError(
|
| 637 |
-
f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
|
| 638 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 639 |
-
)
|
| 640 |
-
|
| 641 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 642 |
-
raise ValueError(
|
| 643 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 644 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 645 |
-
)
|
| 646 |
-
|
| 647 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 648 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 649 |
-
raise ValueError(
|
| 650 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 651 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 652 |
-
f" {negative_prompt_embeds.shape}."
|
| 653 |
-
)
|
| 654 |
-
|
| 655 |
-
def fuse_qkv_projections(self) -> None:
|
| 656 |
-
r"""Enables fused QKV projections."""
|
| 657 |
-
self.fusing_transformer = True
|
| 658 |
-
self.transformer.fuse_qkv_projections()
|
| 659 |
-
|
| 660 |
-
def unfuse_qkv_projections(self) -> None:
|
| 661 |
-
r"""Disable QKV projection fusion if enabled."""
|
| 662 |
-
if not self.fusing_transformer:
|
| 663 |
-
logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
|
| 664 |
-
else:
|
| 665 |
-
self.transformer.unfuse_qkv_projections()
|
| 666 |
-
self.fusing_transformer = False
|
| 667 |
-
|
| 668 |
-
def _prepare_rotary_positional_embeddings(
|
| 669 |
-
self,
|
| 670 |
-
height: int,
|
| 671 |
-
width: int,
|
| 672 |
-
num_frames: int,
|
| 673 |
-
device: torch.device,
|
| 674 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 675 |
-
grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
|
| 676 |
-
grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
|
| 677 |
-
|
| 678 |
-
p = self.transformer.config.patch_size
|
| 679 |
-
p_t = self.transformer.config.patch_size_t
|
| 680 |
-
|
| 681 |
-
base_size_width = self.transformer.config.sample_width // p
|
| 682 |
-
base_size_height = self.transformer.config.sample_height // p
|
| 683 |
-
|
| 684 |
-
if p_t is None:
|
| 685 |
-
# CogVideoX 1.0
|
| 686 |
-
grid_crops_coords = get_resize_crop_region_for_grid(
|
| 687 |
-
(grid_height, grid_width), base_size_width, base_size_height
|
| 688 |
-
)
|
| 689 |
-
freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
|
| 690 |
-
embed_dim=self.transformer.config.attention_head_dim,
|
| 691 |
-
crops_coords=grid_crops_coords,
|
| 692 |
-
grid_size=(grid_height, grid_width),
|
| 693 |
-
temporal_size=num_frames,
|
| 694 |
-
)
|
| 695 |
-
else:
|
| 696 |
-
# CogVideoX 1.5
|
| 697 |
-
base_num_frames = (num_frames + p_t - 1) // p_t
|
| 698 |
-
|
| 699 |
-
freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
|
| 700 |
-
embed_dim=self.transformer.config.attention_head_dim,
|
| 701 |
-
crops_coords=None,
|
| 702 |
-
grid_size=(grid_height, grid_width),
|
| 703 |
-
temporal_size=base_num_frames,
|
| 704 |
-
grid_type="slice",
|
| 705 |
-
max_size=(base_size_height, base_size_width),
|
| 706 |
-
)
|
| 707 |
-
|
| 708 |
-
freqs_cos = freqs_cos.to(device=device)
|
| 709 |
-
freqs_sin = freqs_sin.to(device=device)
|
| 710 |
-
return freqs_cos, freqs_sin
|
| 711 |
-
|
| 712 |
-
@property
|
| 713 |
-
def guidance_scale(self):
|
| 714 |
-
return self._guidance_scale
|
| 715 |
-
|
| 716 |
-
@property
|
| 717 |
-
def num_timesteps(self):
|
| 718 |
-
return self._num_timesteps
|
| 719 |
-
|
| 720 |
-
@property
|
| 721 |
-
def attention_kwargs(self):
|
| 722 |
-
return self._attention_kwargs
|
| 723 |
-
|
| 724 |
-
@property
|
| 725 |
-
def interrupt(self):
|
| 726 |
-
return self._interrupt
|
| 727 |
-
|
| 728 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
| 729 |
-
def get_timesteps(self, num_inference_steps, strength, device):
|
| 730 |
-
# get the original timestep using init_timestep
|
| 731 |
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 732 |
-
|
| 733 |
-
t_start = max(num_inference_steps - init_timestep, 0)
|
| 734 |
-
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 735 |
-
|
| 736 |
-
return timesteps, num_inference_steps - t_start
|
| 737 |
-
|
| 738 |
-
@torch.no_grad()
|
| 739 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 740 |
-
def __call__(
|
| 741 |
-
self,
|
| 742 |
-
prompt: Optional[Union[str, List[str]]] = None,
|
| 743 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 744 |
-
height: int = 480,
|
| 745 |
-
width: int = 720,
|
| 746 |
-
video: Union[torch.FloatTensor] = None,
|
| 747 |
-
mask_video: Union[torch.FloatTensor] = None,
|
| 748 |
-
masked_video_latents: Union[torch.FloatTensor] = None,
|
| 749 |
-
num_frames: int = 49,
|
| 750 |
-
num_inference_steps: int = 50,
|
| 751 |
-
timesteps: Optional[List[int]] = None,
|
| 752 |
-
guidance_scale: float = 6,
|
| 753 |
-
use_dynamic_cfg: bool = False,
|
| 754 |
-
num_videos_per_prompt: int = 1,
|
| 755 |
-
eta: float = 0.0,
|
| 756 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 757 |
-
latents: Optional[torch.FloatTensor] = None,
|
| 758 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 759 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 760 |
-
output_type: str = "numpy",
|
| 761 |
-
return_dict: bool = False,
|
| 762 |
-
callback_on_step_end: Optional[
|
| 763 |
-
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
| 764 |
-
] = None,
|
| 765 |
-
attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 766 |
-
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 767 |
-
max_sequence_length: int = 226,
|
| 768 |
-
strength: float = 1,
|
| 769 |
-
noise_aug_strength: float = 0.0563,
|
| 770 |
-
comfyui_progressbar: bool = False,
|
| 771 |
-
temporal_multidiffusion_stride: int = 16,
|
| 772 |
-
use_trimask: bool = False,
|
| 773 |
-
zero_out_mask_region: bool = False,
|
| 774 |
-
binarize_mask: bool = False,
|
| 775 |
-
skip_unet: bool = False,
|
| 776 |
-
use_vae_mask: bool = False,
|
| 777 |
-
stack_mask: bool = False,
|
| 778 |
-
) -> Union[CogVideoXFunPipelineOutput, Tuple]:
|
| 779 |
-
"""
|
| 780 |
-
Function invoked when calling the pipeline for generation.
|
| 781 |
-
|
| 782 |
-
Args:
|
| 783 |
-
prompt (`str` or `List[str]`, *optional*):
|
| 784 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 785 |
-
instead.
|
| 786 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
| 787 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 788 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 789 |
-
less than `1`).
|
| 790 |
-
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 791 |
-
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 792 |
-
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 793 |
-
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 794 |
-
num_frames (`int`, defaults to `48`):
|
| 795 |
-
Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
|
| 796 |
-
contain 1 extra frame because CogVideoX_Fun is conditioned with (num_seconds * fps + 1) frames where
|
| 797 |
-
num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that
|
| 798 |
-
needs to be satisfied is that of divisibility mentioned above.
|
| 799 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 800 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 801 |
-
expense of slower inference.
|
| 802 |
-
timesteps (`List[int]`, *optional*):
|
| 803 |
-
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 804 |
-
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 805 |
-
passed will be used. Must be in descending order.
|
| 806 |
-
guidance_scale (`float`, *optional*, defaults to 7.0):
|
| 807 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 808 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 809 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 810 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 811 |
-
usually at the expense of lower image quality.
|
| 812 |
-
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 813 |
-
The number of videos to generate per prompt.
|
| 814 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 815 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 816 |
-
to make generation deterministic.
|
| 817 |
-
latents (`torch.FloatTensor`, *optional*):
|
| 818 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 819 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 820 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
| 821 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 822 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 823 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
| 824 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 825 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 826 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 827 |
-
argument.
|
| 828 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 829 |
-
The output format of the generate image. Choose between
|
| 830 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 831 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
| 832 |
-
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 833 |
-
of a plain tuple.
|
| 834 |
-
callback_on_step_end (`Callable`, *optional*):
|
| 835 |
-
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 836 |
-
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 837 |
-
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 838 |
-
`callback_on_step_end_tensor_inputs`.
|
| 839 |
-
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 840 |
-
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 841 |
-
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 842 |
-
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 843 |
-
max_sequence_length (`int`, defaults to `226`):
|
| 844 |
-
Maximum sequence length in encoded prompt. Must be consistent with
|
| 845 |
-
`self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
|
| 846 |
-
|
| 847 |
-
Examples:
|
| 848 |
-
|
| 849 |
-
Returns:
|
| 850 |
-
[`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXFunPipelineOutput`] or `tuple`:
|
| 851 |
-
[`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXFunPipelineOutput`] if `return_dict` is True, otherwise a
|
| 852 |
-
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
| 853 |
-
"""
|
| 854 |
-
|
| 855 |
-
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 856 |
-
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 857 |
-
|
| 858 |
-
height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial
|
| 859 |
-
width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
|
| 860 |
-
num_frames = num_frames or self.transformer.config.sample_frames
|
| 861 |
-
|
| 862 |
-
num_videos_per_prompt = 1
|
| 863 |
-
|
| 864 |
-
# 1. Check inputs. Raise error if not correct
|
| 865 |
-
self.check_inputs(
|
| 866 |
-
prompt,
|
| 867 |
-
height,
|
| 868 |
-
width,
|
| 869 |
-
negative_prompt,
|
| 870 |
-
callback_on_step_end_tensor_inputs,
|
| 871 |
-
prompt_embeds,
|
| 872 |
-
negative_prompt_embeds,
|
| 873 |
-
)
|
| 874 |
-
self._guidance_scale = guidance_scale
|
| 875 |
-
self._attention_kwargs = attention_kwargs
|
| 876 |
-
self._interrupt = False
|
| 877 |
-
|
| 878 |
-
# 2. Default call parameters
|
| 879 |
-
if prompt is not None and isinstance(prompt, str):
|
| 880 |
-
batch_size = 1
|
| 881 |
-
elif prompt is not None and isinstance(prompt, list):
|
| 882 |
-
batch_size = len(prompt)
|
| 883 |
-
else:
|
| 884 |
-
batch_size = prompt_embeds.shape[0]
|
| 885 |
-
|
| 886 |
-
device = self._execution_device
|
| 887 |
-
|
| 888 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 889 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 890 |
-
# corresponds to doing no classifier free guidance.
|
| 891 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
| 892 |
-
logger.info(f'Use cfg: {do_classifier_free_guidance}, guidance_scale={guidance_scale}')
|
| 893 |
-
|
| 894 |
-
# 3. Encode input prompt
|
| 895 |
-
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 896 |
-
prompt,
|
| 897 |
-
negative_prompt,
|
| 898 |
-
do_classifier_free_guidance,
|
| 899 |
-
num_videos_per_prompt=num_videos_per_prompt,
|
| 900 |
-
prompt_embeds=prompt_embeds,
|
| 901 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
| 902 |
-
max_sequence_length=max_sequence_length,
|
| 903 |
-
device=device,
|
| 904 |
-
)
|
| 905 |
-
if do_classifier_free_guidance:
|
| 906 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 907 |
-
|
| 908 |
-
# 4. set timesteps
|
| 909 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 910 |
-
timesteps, num_inference_steps = self.get_timesteps(
|
| 911 |
-
num_inference_steps=num_inference_steps, strength=strength, device=device
|
| 912 |
-
)
|
| 913 |
-
self._num_timesteps = len(timesteps)
|
| 914 |
-
if comfyui_progressbar:
|
| 915 |
-
from comfy.utils import ProgressBar
|
| 916 |
-
pbar = ProgressBar(num_inference_steps + 2)
|
| 917 |
-
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
|
| 918 |
-
latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
|
| 919 |
-
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
|
| 920 |
-
is_strength_max = strength == 1.0
|
| 921 |
-
|
| 922 |
-
# 5. Prepare latents.
|
| 923 |
-
if video is not None:
|
| 924 |
-
video_length = video.shape[2]
|
| 925 |
-
init_video = self.image_processor.preprocess(rearrange(video, "b c f h w -> (b f) c h w"), height=height, width=width)
|
| 926 |
-
init_video = init_video.to(dtype=torch.float32)
|
| 927 |
-
init_video = rearrange(init_video, "(b f) c h w -> b c f h w", f=video_length)
|
| 928 |
-
else:
|
| 929 |
-
video_length = num_frames
|
| 930 |
-
init_video = None
|
| 931 |
-
|
| 932 |
-
# Magvae needs the number of frames to be 4n + 1.
|
| 933 |
-
local_latent_length = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
| 934 |
-
# For CogVideoX 1.5, the latent frames should be clipped to make it divisible by patch_size_t
|
| 935 |
-
patch_size_t = self.transformer.config.patch_size_t
|
| 936 |
-
additional_frames = 0
|
| 937 |
-
if patch_size_t is not None and local_latent_length % patch_size_t != 0:
|
| 938 |
-
additional_frames = local_latent_length % patch_size_t
|
| 939 |
-
num_frames -= additional_frames * self.vae_scale_factor_temporal
|
| 940 |
-
if num_frames <= 0:
|
| 941 |
-
num_frames = 1
|
| 942 |
-
|
| 943 |
-
num_channels_latents = self.vae.config.latent_channels
|
| 944 |
-
num_channels_transformer = self.transformer.config.in_channels
|
| 945 |
-
return_image_latents = num_channels_transformer == num_channels_latents
|
| 946 |
-
|
| 947 |
-
latents_outputs = self.prepare_latents(
|
| 948 |
-
batch_size * num_videos_per_prompt,
|
| 949 |
-
num_channels_latents,
|
| 950 |
-
height,
|
| 951 |
-
width,
|
| 952 |
-
video_length,
|
| 953 |
-
prompt_embeds.dtype,
|
| 954 |
-
device,
|
| 955 |
-
generator,
|
| 956 |
-
latents,
|
| 957 |
-
video=init_video,
|
| 958 |
-
timestep=latent_timestep,
|
| 959 |
-
is_strength_max=is_strength_max,
|
| 960 |
-
return_noise=True,
|
| 961 |
-
return_video_latents=return_image_latents,
|
| 962 |
-
)
|
| 963 |
-
if return_image_latents:
|
| 964 |
-
latents, noise, image_latents = latents_outputs
|
| 965 |
-
else:
|
| 966 |
-
latents, noise = latents_outputs
|
| 967 |
-
if comfyui_progressbar:
|
| 968 |
-
pbar.update(1)
|
| 969 |
-
|
| 970 |
-
if mask_video is not None:
|
| 971 |
-
if (mask_video == 255).all():
|
| 972 |
-
mask_latents = torch.zeros_like(latents)[:, :, :1].to(latents.device, latents.dtype)
|
| 973 |
-
masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype)
|
| 974 |
-
|
| 975 |
-
mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents
|
| 976 |
-
masked_video_latents_input = (
|
| 977 |
-
torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
|
| 978 |
-
)
|
| 979 |
-
inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype)
|
| 980 |
-
else:
|
| 981 |
-
# Prepare mask latent variables
|
| 982 |
-
video_length = video.shape[2]
|
| 983 |
-
mask_condition = self.mask_processor.preprocess(rearrange(mask_video, "b c f h w -> (b f) c h w"), height=height, width=width)
|
| 984 |
-
if use_trimask:
|
| 985 |
-
mask_condition = torch.where(mask_condition > 0.75, 1., mask_condition)
|
| 986 |
-
mask_condition = torch.where((mask_condition <= 0.75) * (mask_condition >= 0.25), 127. / 255., mask_condition)
|
| 987 |
-
mask_condition = torch.where(mask_condition < 0.25, 0., mask_condition)
|
| 988 |
-
else:
|
| 989 |
-
mask_condition = torch.where(mask_condition > 0.5, 1., 0.)
|
| 990 |
-
|
| 991 |
-
mask_condition = mask_condition.to(dtype=torch.float32)
|
| 992 |
-
mask_condition = rearrange(mask_condition, "(b f) c h w -> b c f h w", f=video_length)
|
| 993 |
-
|
| 994 |
-
if num_channels_transformer != num_channels_latents:
|
| 995 |
-
mask_condition_tile = torch.tile(mask_condition, [1, 3, 1, 1, 1])
|
| 996 |
-
if masked_video_latents is None:
|
| 997 |
-
if zero_out_mask_region:
|
| 998 |
-
masked_video = init_video * (mask_condition_tile < 0.75) + torch.ones_like(init_video) * (mask_condition_tile > 0.75) * -1
|
| 999 |
-
else:
|
| 1000 |
-
masked_video = init_video
|
| 1001 |
-
else:
|
| 1002 |
-
masked_video = masked_video_latents
|
| 1003 |
-
|
| 1004 |
-
mask_encoded, masked_video_latents = self.prepare_mask_latents(
|
| 1005 |
-
1 - mask_condition_tile if use_vae_mask else None,
|
| 1006 |
-
masked_video,
|
| 1007 |
-
batch_size,
|
| 1008 |
-
height,
|
| 1009 |
-
width,
|
| 1010 |
-
prompt_embeds.dtype,
|
| 1011 |
-
device,
|
| 1012 |
-
generator,
|
| 1013 |
-
do_classifier_free_guidance,
|
| 1014 |
-
noise_aug_strength=noise_aug_strength,
|
| 1015 |
-
)
|
| 1016 |
-
if not use_vae_mask and not stack_mask:
|
| 1017 |
-
mask_latents = resize_mask(1 - mask_condition, masked_video_latents)
|
| 1018 |
-
if binarize_mask:
|
| 1019 |
-
if use_trimask:
|
| 1020 |
-
mask_latents = torch.where(mask_latents > 0.75, 1., mask_latents)
|
| 1021 |
-
mask_latents = torch.where((mask_latents <= 0.75) * (mask_latents >= 0.25), 0.5, mask_latents)
|
| 1022 |
-
mask_latents = torch.where(mask_latents < 0.25, 0., mask_latents)
|
| 1023 |
-
else:
|
| 1024 |
-
mask_latents = torch.where(mask_latents < 0.9, 0., 1.).to(mask_latents.dtype)
|
| 1025 |
-
|
| 1026 |
-
mask_latents = mask_latents.to(masked_video_latents.device) * self.vae.config.scaling_factor
|
| 1027 |
-
|
| 1028 |
-
mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1])
|
| 1029 |
-
mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
|
| 1030 |
-
|
| 1031 |
-
mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents
|
| 1032 |
-
mask = rearrange(mask, "b c f h w -> b f c h w")
|
| 1033 |
-
elif stack_mask:
|
| 1034 |
-
mask_latents = torch.cat([
|
| 1035 |
-
torch.repeat_interleave(mask_condition[:, :, 0:1], repeats=4, dim=2),
|
| 1036 |
-
mask_condition[:, :, 1:],
|
| 1037 |
-
], dim=2)
|
| 1038 |
-
mask_latents = mask_latents.view(
|
| 1039 |
-
mask_latents.shape[0],
|
| 1040 |
-
mask_latents.shape[2] // 4,
|
| 1041 |
-
4,
|
| 1042 |
-
mask_latents.shape[3],
|
| 1043 |
-
mask_latents.shape[4],
|
| 1044 |
-
)
|
| 1045 |
-
mask_latents = mask_latents.transpose(1, 2)
|
| 1046 |
-
mask_latents = resize_mask(1 - mask_latents, masked_video_latents).to(latents.device, latents.dtype)
|
| 1047 |
-
mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents
|
| 1048 |
-
else:
|
| 1049 |
-
mask_input = (
|
| 1050 |
-
torch.cat([mask_encoded] * 2) if do_classifier_free_guidance else mask_encoded
|
| 1051 |
-
)
|
| 1052 |
-
|
| 1053 |
-
masked_video_latents_input = (
|
| 1054 |
-
torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
|
| 1055 |
-
)
|
| 1056 |
-
|
| 1057 |
-
mask_input = rearrange(mask_input, "b c f h w -> b f c h w")
|
| 1058 |
-
masked_video_latents_input = rearrange(masked_video_latents_input, "b c f h w -> b f c h w")
|
| 1059 |
-
|
| 1060 |
-
# concat(binary mask, encode(mask * video))
|
| 1061 |
-
inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype)
|
| 1062 |
-
else:
|
| 1063 |
-
mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1])
|
| 1064 |
-
mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
|
| 1065 |
-
mask = rearrange(mask, "b c f h w -> b f c h w")
|
| 1066 |
-
|
| 1067 |
-
inpaint_latents = None
|
| 1068 |
-
else:
|
| 1069 |
-
if num_channels_transformer != num_channels_latents:
|
| 1070 |
-
mask = torch.zeros_like(latents).to(latents.device, latents.dtype)
|
| 1071 |
-
masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype)
|
| 1072 |
-
|
| 1073 |
-
mask_input = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
| 1074 |
-
masked_video_latents_input = (
|
| 1075 |
-
torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
|
| 1076 |
-
)
|
| 1077 |
-
inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=1).to(latents.dtype)
|
| 1078 |
-
else:
|
| 1079 |
-
mask = torch.zeros_like(init_video[:, :1])
|
| 1080 |
-
mask = torch.tile(mask, [1, num_channels_latents, 1, 1, 1])
|
| 1081 |
-
mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
|
| 1082 |
-
mask = rearrange(mask, "b c f h w -> b f c h w")
|
| 1083 |
-
|
| 1084 |
-
inpaint_latents = None
|
| 1085 |
-
if comfyui_progressbar:
|
| 1086 |
-
pbar.update(1)
|
| 1087 |
-
|
| 1088 |
-
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1089 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1090 |
-
logger.debug(f'Pipeline mask {mask_condition.shape} {mask_condition.dtype} {mask_condition.min()} {mask_condition.max()}')
|
| 1091 |
-
# 8. Denoising loop
|
| 1092 |
-
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1093 |
-
latent_temporal_window_size = (num_frames - 1) // 4 + 1
|
| 1094 |
-
if latents.size(1) > latent_temporal_window_size:
|
| 1095 |
-
logger.info(f'Adopt temporal multidiffusion for the latents {latents.shape} {latents.dtype}')
|
| 1096 |
-
|
| 1097 |
-
# VAE experiment
|
| 1098 |
-
if skip_unet:
|
| 1099 |
-
masked_video_latents = rearrange(masked_video_latents, "b c f h w -> b f c h w")
|
| 1100 |
-
if output_type == "numpy":
|
| 1101 |
-
video = self.decode_latents(masked_video_latents)
|
| 1102 |
-
elif not output_type == "latent":
|
| 1103 |
-
video = self.decode_latents(masked_video_latents)
|
| 1104 |
-
video = self.video_processor.postprocess_video(video=video, output_type=output_type)
|
| 1105 |
-
else:
|
| 1106 |
-
video = masked_video_latents
|
| 1107 |
-
|
| 1108 |
-
# Offload all models
|
| 1109 |
-
self.maybe_free_model_hooks()
|
| 1110 |
-
|
| 1111 |
-
if not return_dict:
|
| 1112 |
-
video = torch.from_numpy(video)
|
| 1113 |
-
|
| 1114 |
-
return CogVideoXFunPipelineOutput(videos=video)
|
| 1115 |
-
|
| 1116 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1117 |
-
# for DPM-solver++
|
| 1118 |
-
old_pred_original_sample = None
|
| 1119 |
-
for i, t in enumerate(timesteps):
|
| 1120 |
-
if self.interrupt:
|
| 1121 |
-
continue
|
| 1122 |
-
|
| 1123 |
-
def _sample(_latents, _inpaint_latents):
|
| 1124 |
-
# 7. Create rotary embeds if required
|
| 1125 |
-
image_rotary_emb = (
|
| 1126 |
-
self._prepare_rotary_positional_embeddings(height, width, _latents.size(1), device)
|
| 1127 |
-
if self.transformer.config.use_rotary_positional_embeddings
|
| 1128 |
-
else None
|
| 1129 |
-
)
|
| 1130 |
-
|
| 1131 |
-
latent_model_input = torch.cat([_latents] * 2) if do_classifier_free_guidance else _latents
|
| 1132 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1133 |
-
|
| 1134 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 1135 |
-
timestep = t.expand(latent_model_input.shape[0])
|
| 1136 |
-
|
| 1137 |
-
# predict noise model_output
|
| 1138 |
-
noise_pred = self.transformer(
|
| 1139 |
-
hidden_states=latent_model_input,
|
| 1140 |
-
encoder_hidden_states=prompt_embeds,
|
| 1141 |
-
timestep=timestep,
|
| 1142 |
-
image_rotary_emb=image_rotary_emb,
|
| 1143 |
-
return_dict=False,
|
| 1144 |
-
inpaint_latents=_inpaint_latents,
|
| 1145 |
-
)[0]
|
| 1146 |
-
noise_pred = noise_pred.float()
|
| 1147 |
-
|
| 1148 |
-
# perform guidance
|
| 1149 |
-
if use_dynamic_cfg:
|
| 1150 |
-
self._guidance_scale = 1 + guidance_scale * (
|
| 1151 |
-
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
|
| 1152 |
-
)
|
| 1153 |
-
if do_classifier_free_guidance:
|
| 1154 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1155 |
-
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1156 |
-
|
| 1157 |
-
# compute the previous noisy sample x_t -> x_t-1
|
| 1158 |
-
if not isinstance(self.scheduler, CogVideoXDPMScheduler):
|
| 1159 |
-
_latents = self.scheduler.step(noise_pred, t, _latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1160 |
-
else:
|
| 1161 |
-
_latents, old_pred_original_sample = self.scheduler.step(
|
| 1162 |
-
noise_pred,
|
| 1163 |
-
old_pred_original_sample,
|
| 1164 |
-
t,
|
| 1165 |
-
timesteps[i - 1] if i > 0 else None,
|
| 1166 |
-
_latents,
|
| 1167 |
-
**extra_step_kwargs,
|
| 1168 |
-
return_dict=False,
|
| 1169 |
-
)
|
| 1170 |
-
_latents = _latents.to(prompt_embeds.dtype)
|
| 1171 |
-
return _latents
|
| 1172 |
-
|
| 1173 |
-
if latents.size(1) <= latent_temporal_window_size:
|
| 1174 |
-
latents = _sample(latents, inpaint_latents)
|
| 1175 |
-
else:
|
| 1176 |
-
# adopt temporal multidiffusion
|
| 1177 |
-
latents_canvas = torch.zeros_like(latents).float()
|
| 1178 |
-
weights_canvas = torch.zeros(1, latents.size(1), 1, 1, 1).to(latents.device).float()
|
| 1179 |
-
temporal_stride = temporal_multidiffusion_stride // 4
|
| 1180 |
-
assert latent_temporal_window_size > temporal_stride
|
| 1181 |
-
|
| 1182 |
-
time_beg = 0
|
| 1183 |
-
while time_beg < latents.size(1):
|
| 1184 |
-
time_end = min(time_beg + latent_temporal_window_size, latents.size(1))
|
| 1185 |
-
|
| 1186 |
-
latents_i = latents[:, time_beg:time_end]
|
| 1187 |
-
if inpaint_latents is not None:
|
| 1188 |
-
inpaint_latents_i = inpaint_latents[:, time_beg:time_end]
|
| 1189 |
-
else:
|
| 1190 |
-
inpaint_latents_i = None
|
| 1191 |
-
|
| 1192 |
-
latents_i = _sample(latents_i, inpaint_latents_i)
|
| 1193 |
-
|
| 1194 |
-
weights_i = torch.ones(1, time_end - time_beg, 1, 1, 1).to(latents.device).to(latents.dtype)
|
| 1195 |
-
if time_beg > 0 and temporal_stride > 0:
|
| 1196 |
-
weights_i[:, :temporal_stride] = (torch.linspace(0., 1., temporal_stride + 2)[1:-1]
|
| 1197 |
-
.to(latents.device)
|
| 1198 |
-
.to(latents.dtype)
|
| 1199 |
-
.reshape(1, temporal_stride, 1, 1, 1))
|
| 1200 |
-
if time_end < latents.size(1) and temporal_stride > 0:
|
| 1201 |
-
weights_i[:, -temporal_stride:] = (torch.linspace(1., 0., temporal_stride + 2)[1:-1]
|
| 1202 |
-
.to(latents.device)
|
| 1203 |
-
.to(latents.dtype)
|
| 1204 |
-
.reshape(1, temporal_stride, 1, 1, 1))
|
| 1205 |
-
|
| 1206 |
-
latents_canvas[:, time_beg:time_end] += latents_i * weights_i
|
| 1207 |
-
weights_canvas[:, time_beg:time_end] += weights_i
|
| 1208 |
-
|
| 1209 |
-
time_beg = time_end - temporal_stride
|
| 1210 |
-
if time_end >= latents.size(1):
|
| 1211 |
-
break
|
| 1212 |
-
latents = (latents_canvas / weights_canvas).to(latents.dtype)
|
| 1213 |
-
|
| 1214 |
-
# call the callback, if provided
|
| 1215 |
-
if callback_on_step_end is not None:
|
| 1216 |
-
callback_kwargs = {}
|
| 1217 |
-
for k in callback_on_step_end_tensor_inputs:
|
| 1218 |
-
callback_kwargs[k] = locals()[k]
|
| 1219 |
-
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1220 |
-
|
| 1221 |
-
latents = callback_outputs.pop("latents", latents)
|
| 1222 |
-
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1223 |
-
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1224 |
-
|
| 1225 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1226 |
-
progress_bar.update()
|
| 1227 |
-
if comfyui_progressbar:
|
| 1228 |
-
pbar.update(1)
|
| 1229 |
-
|
| 1230 |
-
if output_type == "numpy":
|
| 1231 |
-
video = self.decode_latents(latents)
|
| 1232 |
-
elif not output_type == "latent":
|
| 1233 |
-
video = self.decode_latents(latents)
|
| 1234 |
-
video = self.video_processor.postprocess_video(video=video, output_type=output_type)
|
| 1235 |
-
else:
|
| 1236 |
-
video = latents
|
| 1237 |
-
|
| 1238 |
-
# Offload all models
|
| 1239 |
-
self.maybe_free_model_hooks()
|
| 1240 |
-
|
| 1241 |
-
if not return_dict:
|
| 1242 |
-
video = torch.from_numpy(video)
|
| 1243 |
-
|
| 1244 |
-
return CogVideoXFunPipelineOutput(videos=video)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|