| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """SD3 LoRA fine-tuning script for text2image generation.""" |
|
|
| import argparse |
| import copy |
| import json |
| import logging |
| import math |
| import os |
| import random |
| import shutil |
| from contextlib import nullcontext |
| from pathlib import Path |
|
|
| import datasets |
| import numpy as np |
| import torch |
| import torch.nn.functional as F |
| import torch.utils.checkpoint |
| import transformers |
| from accelerate import Accelerator |
| from accelerate.logging import get_logger |
| from accelerate.utils import DistributedDataParallelKwargs, DistributedType, ProjectConfiguration, set_seed |
| from datasets import load_dataset |
| from huggingface_hub import create_repo, upload_folder |
| from packaging import version |
| from peft import LoraConfig, set_peft_model_state_dict |
| from peft.utils import get_peft_model_state_dict |
| from PIL import Image |
| from torchvision import transforms |
| from torchvision.transforms.functional import crop |
| from tqdm.auto import tqdm |
| from transformers import CLIPTokenizer, PretrainedConfig, T5TokenizerFast |
|
|
| import diffusers |
| from diffusers import ( |
| AutoencoderKL, |
| FlowMatchEulerDiscreteScheduler, |
| SD3Transformer2DModel, |
| StableDiffusion3Pipeline, |
| ) |
| from diffusers.optimization import get_scheduler |
| from diffusers.training_utils import ( |
| _set_state_dict_into_text_encoder, |
| cast_training_params, |
| compute_density_for_timestep_sampling, |
| compute_loss_weighting_for_sd3, |
| free_memory, |
| ) |
| from diffusers.utils import ( |
| check_min_version, |
| convert_unet_state_dict_to_peft, |
| is_wandb_available, |
| ) |
| from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card |
| from diffusers.utils.torch_utils import is_compiled_module |
|
|
| if is_wandb_available(): |
| import wandb |
|
|
| |
| check_min_version("0.30.0") |
|
|
| logger = get_logger(__name__) |
|
|
|
|
| def save_model_card( |
| repo_id: str, |
| images: list = None, |
| base_model: str = None, |
| dataset_name: str = None, |
| train_text_encoder: bool = False, |
| repo_folder: str = None, |
| vae_path: str = None, |
| ): |
| """Save model card for SD3 LoRA model.""" |
| img_str = "" |
| if images is not None: |
| for i, image in enumerate(images): |
| image.save(os.path.join(repo_folder, f"image_{i}.png")) |
| img_str += f"\n" |
|
|
| model_description = f""" |
| # SD3 LoRA text2image fine-tuning - {repo_id} |
| |
| These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n |
| {img_str} |
| |
| LoRA for the text encoder was enabled: {train_text_encoder}. |
| |
| Special VAE used for training: {vae_path}. |
| """ |
| model_card = load_or_create_model_card( |
| repo_id_or_path=repo_id, |
| from_training=True, |
| license="other", |
| base_model=base_model, |
| model_description=model_description, |
| inference=True, |
| ) |
|
|
| tags = [ |
| "stable-diffusion-3", |
| "stable-diffusion-3-diffusers", |
| "text-to-image", |
| "diffusers", |
| "diffusers-training", |
| "lora", |
| "sd3", |
| ] |
| model_card = populate_model_card(model_card, tags=tags) |
| model_card.save(os.path.join(repo_folder, "README.md")) |
|
|
|
|
| def log_validation( |
| pipeline, |
| args, |
| accelerator, |
| epoch, |
| is_final_validation=False, |
| global_step=None, |
| ): |
| """Run validation and log images.""" |
| logger.info( |
| f"Running validation... \n Generating {args.num_validation_images} images with prompt:" |
| f" {args.validation_prompt}." |
| ) |
| pipeline = pipeline.to(accelerator.device) |
| pipeline.set_progress_bar_config(disable=True) |
|
|
| |
| generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None |
| pipeline_args = {"prompt": args.validation_prompt} |
| |
| if torch.backends.mps.is_available(): |
| autocast_ctx = nullcontext() |
| else: |
| autocast_ctx = torch.autocast(accelerator.device.type) |
|
|
| with autocast_ctx: |
| images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)] |
|
|
| |
| if accelerator.is_main_process: |
| validation_dir = os.path.join(args.output_dir, "validation_images") |
| os.makedirs(validation_dir, exist_ok=True) |
| for i, image in enumerate(images): |
| |
| if global_step is not None: |
| filename = f"validation_step_{global_step}_epoch_{epoch}_img_{i}.png" |
| else: |
| filename = f"validation_epoch_{epoch}_img_{i}.png" |
| |
| image_path = os.path.join(validation_dir, filename) |
| image.save(image_path) |
| logger.info(f"Saved validation image: {image_path}") |
|
|
| for tracker in accelerator.trackers if hasattr(accelerator, 'trackers') and accelerator.trackers else []: |
| phase_name = "test" if is_final_validation else "validation" |
| try: |
| if tracker.name == "tensorboard": |
| np_images = np.stack([np.asarray(img) for img in images]) |
| tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC") |
| if tracker.name == "wandb": |
| tracker.log( |
| { |
| phase_name: [ |
| wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) |
| ] |
| } |
| ) |
| except Exception as e: |
| logger.warning(f"Failed to log to {tracker.name}: {e}") |
| |
| del pipeline |
| free_memory() |
| return images |
|
|
|
|
| def import_model_class_from_model_name_or_path( |
| pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" |
| ): |
| """Import the correct text encoder class.""" |
| text_encoder_config = PretrainedConfig.from_pretrained( |
| pretrained_model_name_or_path, subfolder=subfolder, revision=revision |
| ) |
| model_class = text_encoder_config.architectures[0] |
|
|
| if model_class == "CLIPTextModelWithProjection": |
| from transformers import CLIPTextModelWithProjection |
| return CLIPTextModelWithProjection |
| elif model_class == "T5EncoderModel": |
| from transformers import T5EncoderModel |
| return T5EncoderModel |
| else: |
| raise ValueError(f"{model_class} is not supported.") |
|
|
|
|
| def parse_args(input_args=None): |
| """Parse command line arguments.""" |
| parser = argparse.ArgumentParser(description="SD3 LoRA training script.") |
| |
| |
| parser.add_argument( |
| "--pretrained_model_name_or_path", |
| type=str, |
| default=None, |
| required=True, |
| help="Path to pretrained model or model identifier from huggingface.co/models.", |
| ) |
| parser.add_argument( |
| "--revision", |
| type=str, |
| default=None, |
| help="Revision of pretrained model identifier from huggingface.co/models.", |
| ) |
| parser.add_argument( |
| "--variant", |
| type=str, |
| default=None, |
| help="Variant of the model files, e.g. fp16", |
| ) |
| |
| |
| parser.add_argument( |
| "--dataset_name", |
| type=str, |
| default=None, |
| help="The name of the Dataset to train on.", |
| ) |
| parser.add_argument( |
| "--dataset_config_name", |
| type=str, |
| default=None, |
| help="The config of the Dataset.", |
| ) |
| parser.add_argument( |
| "--train_data_dir", |
| type=str, |
| default=None, |
| help="A folder containing the training data.", |
| ) |
| parser.add_argument( |
| "--image_column", |
| type=str, |
| default="image", |
| help="The column of the dataset containing an image." |
| ) |
| parser.add_argument( |
| "--caption_column", |
| type=str, |
| default="caption", |
| help="The column of the dataset containing a caption.", |
| ) |
| |
| |
| parser.add_argument( |
| "--max_sequence_length", |
| type=int, |
| default=77, |
| help="Maximum sequence length to use with the T5 text encoder", |
| ) |
| parser.add_argument( |
| "--validation_prompt", |
| type=str, |
| default=None, |
| help="A prompt used during validation.", |
| ) |
| parser.add_argument( |
| "--num_validation_images", |
| type=int, |
| default=4, |
| help="Number of images for validation.", |
| ) |
| parser.add_argument( |
| "--validation_epochs", |
| type=int, |
| default=1, |
| help="Run validation every X epochs.", |
| ) |
| parser.add_argument( |
| "--max_train_samples", |
| type=int, |
| default=None, |
| help="Truncate the number of training examples.", |
| ) |
| parser.add_argument( |
| "--output_dir", |
| type=str, |
| default="sd3-lora-finetuned", |
| help="Output directory for model predictions and checkpoints.", |
| ) |
| parser.add_argument( |
| "--cache_dir", |
| type=str, |
| default=None, |
| help="Directory to store downloaded models and datasets.", |
| ) |
| parser.add_argument( |
| "--seed", |
| type=int, |
| default=None, |
| help="A seed for reproducible training." |
| ) |
| parser.add_argument( |
| "--resolution", |
| type=int, |
| default=1024, |
| help="Image resolution for training.", |
| ) |
| parser.add_argument( |
| "--center_crop", |
| default=False, |
| action="store_true", |
| help="Whether to center crop input images.", |
| ) |
| parser.add_argument( |
| "--random_flip", |
| action="store_true", |
| help="Whether to randomly flip images horizontally.", |
| ) |
| parser.add_argument( |
| "--train_text_encoder", |
| action="store_true", |
| help="Whether to train the text encoder.", |
| ) |
| parser.add_argument( |
| "--train_batch_size", |
| type=int, |
| default=16, |
| help="Batch size for training dataloader." |
| ) |
| parser.add_argument( |
| "--num_train_epochs", |
| type=int, |
| default=100 |
| ) |
| parser.add_argument( |
| "--max_train_steps", |
| type=int, |
| default=None, |
| help="Total number of training steps.", |
| ) |
| parser.add_argument( |
| "--checkpointing_steps", |
| type=int, |
| default=500, |
| help="Save checkpoint every X updates.", |
| ) |
| parser.add_argument( |
| "--checkpoints_total_limit", |
| type=int, |
| default=None, |
| help="Max number of checkpoints to store.", |
| ) |
| parser.add_argument( |
| "--resume_from_checkpoint", |
| type=str, |
| default=None, |
| help="Path to resume training from checkpoint.", |
| ) |
| parser.add_argument( |
| "--gradient_accumulation_steps", |
| type=int, |
| default=1, |
| help="Number of update steps to accumulate.", |
| ) |
| parser.add_argument( |
| "--gradient_checkpointing", |
| action="store_true", |
| help="Use gradient checkpointing to save memory.", |
| ) |
| parser.add_argument( |
| "--learning_rate", |
| type=float, |
| default=1e-4, |
| help="Initial learning rate.", |
| ) |
| parser.add_argument( |
| "--scale_lr", |
| action="store_true", |
| default=False, |
| help="Scale learning rate by number of GPUs, etc.", |
| ) |
| parser.add_argument( |
| "--lr_scheduler", |
| type=str, |
| default="constant", |
| help="Learning rate scheduler type.", |
| ) |
| parser.add_argument( |
| "--lr_warmup_steps", |
| type=int, |
| default=500, |
| help="Number of warmup steps." |
| ) |
| |
| |
| parser.add_argument( |
| "--weighting_scheme", |
| type=str, |
| default="logit_normal", |
| choices=["sigma_sqrt", "logit_normal", "mode", "cosmap"], |
| help="Weighting scheme for flow matching loss.", |
| ) |
| parser.add_argument( |
| "--logit_mean", |
| type=float, |
| default=0.0, |
| help="Mean for logit_normal weighting." |
| ) |
| parser.add_argument( |
| "--logit_std", |
| type=float, |
| default=1.0, |
| help="Std for logit_normal weighting." |
| ) |
| parser.add_argument( |
| "--mode_scale", |
| type=float, |
| default=1.29, |
| help="Scale for mode weighting scheme.", |
| ) |
| parser.add_argument( |
| "--precondition_outputs", |
| type=int, |
| default=1, |
| help="Whether to precondition model outputs.", |
| ) |
| |
| |
| parser.add_argument( |
| "--allow_tf32", |
| action="store_true", |
| help="Allow TF32 on Ampere GPUs.", |
| ) |
| parser.add_argument( |
| "--dataloader_num_workers", |
| type=int, |
| default=0, |
| help="Number of data loading workers.", |
| ) |
| parser.add_argument( |
| "--use_8bit_adam", |
| action="store_true", |
| help="Use 8-bit Adam optimizer." |
| ) |
| parser.add_argument( |
| "--adam_beta1", |
| type=float, |
| default=0.9, |
| help="Beta1 for Adam optimizer." |
| ) |
| parser.add_argument( |
| "--adam_beta2", |
| type=float, |
| default=0.999, |
| help="Beta2 for Adam optimizer." |
| ) |
| parser.add_argument( |
| "--adam_weight_decay", |
| type=float, |
| default=1e-2, |
| help="Weight decay for Adam." |
| ) |
| parser.add_argument( |
| "--adam_epsilon", |
| type=float, |
| default=1e-08, |
| help="Epsilon for Adam optimizer." |
| ) |
| parser.add_argument( |
| "--max_grad_norm", |
| default=1.0, |
| type=float, |
| help="Max gradient norm." |
| ) |
| |
| |
| parser.add_argument( |
| "--push_to_hub", |
| action="store_true", |
| help="Push model to the Hub." |
| ) |
| parser.add_argument( |
| "--hub_token", |
| type=str, |
| default=None, |
| help="Token for Model Hub." |
| ) |
| parser.add_argument( |
| "--hub_model_id", |
| type=str, |
| default=None, |
| help="Repository name for the Hub.", |
| ) |
| parser.add_argument( |
| "--logging_dir", |
| type=str, |
| default="logs", |
| help="TensorBoard log directory.", |
| ) |
| parser.add_argument( |
| "--report_to", |
| type=str, |
| default="tensorboard", |
| help="Logging integration to use.", |
| ) |
| parser.add_argument( |
| "--mixed_precision", |
| type=str, |
| default=None, |
| choices=["no", "fp16", "bf16"], |
| help="Mixed precision type.", |
| ) |
| parser.add_argument( |
| "--local_rank", |
| type=int, |
| default=-1, |
| help="Local rank for distributed training." |
| ) |
| |
| |
| parser.add_argument( |
| "--rank", |
| type=int, |
| default=64, |
| help="LoRA rank dimension.", |
| ) |
| |
| if input_args is not None: |
| args = parser.parse_args(input_args) |
| else: |
| args = parser.parse_args() |
|
|
| env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| if env_local_rank != -1 and env_local_rank != args.local_rank: |
| args.local_rank = env_local_rank |
|
|
| |
| if args.dataset_name is None and args.train_data_dir is None: |
| raise ValueError("Need either a dataset name or a training folder.") |
|
|
| return args |
|
|
|
|
| DATASET_NAME_MAPPING = { |
| "lambdalabs/naruto-blip-captions": ("image", "text"), |
| } |
|
|
|
|
| def tokenize_prompt(tokenizer, prompt): |
| """Tokenize prompt using the given tokenizer.""" |
| text_inputs = tokenizer( |
| prompt, |
| padding="max_length", |
| max_length=77, |
| truncation=True, |
| return_tensors="pt", |
| ) |
| return text_inputs.input_ids |
|
|
|
|
| def _encode_prompt_with_t5( |
| text_encoder, |
| tokenizer, |
| max_sequence_length, |
| prompt=None, |
| num_images_per_prompt=1, |
| device=None, |
| text_input_ids=None, |
| ): |
| """Encode prompt using T5 text encoder.""" |
| if prompt is not None: |
| prompt = [prompt] if isinstance(prompt, str) else prompt |
| batch_size = len(prompt) |
| else: |
| |
| if text_input_ids is None: |
| raise ValueError("Either prompt or text_input_ids must be provided") |
| batch_size = text_input_ids.shape[0] |
|
|
| if tokenizer is not None and prompt is not None: |
| text_inputs = tokenizer( |
| prompt, |
| padding="max_length", |
| max_length=max_sequence_length, |
| truncation=True, |
| add_special_tokens=True, |
| return_tensors="pt", |
| ) |
| text_input_ids = text_inputs.input_ids |
| else: |
| if text_input_ids is None: |
| raise ValueError("text_input_ids must be provided when tokenizer is not specified or prompt is None") |
|
|
| prompt_embeds = text_encoder(text_input_ids.to(device))[0] |
| dtype = text_encoder.dtype |
| prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
|
|
| _, seq_len, _ = prompt_embeds.shape |
| |
| prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
| prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
|
|
| return prompt_embeds |
|
|
|
|
| def _encode_prompt_with_clip( |
| text_encoder, |
| tokenizer, |
| prompt: str, |
| device=None, |
| text_input_ids=None, |
| num_images_per_prompt: int = 1, |
| ): |
| """Encode prompt using CLIP text encoder.""" |
| if prompt is not None: |
| prompt = [prompt] if isinstance(prompt, str) else prompt |
| batch_size = len(prompt) |
| else: |
| |
| if text_input_ids is None: |
| raise ValueError("Either prompt or text_input_ids must be provided") |
| batch_size = text_input_ids.shape[0] |
|
|
| if tokenizer is not None and prompt is not None: |
| text_inputs = tokenizer( |
| prompt, |
| padding="max_length", |
| max_length=77, |
| truncation=True, |
| return_tensors="pt", |
| ) |
| text_input_ids = text_inputs.input_ids |
| else: |
| if text_input_ids is None: |
| raise ValueError("text_input_ids must be provided when tokenizer is not specified or prompt is None") |
|
|
| prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) |
| pooled_prompt_embeds = prompt_embeds[0] |
| prompt_embeds = prompt_embeds.hidden_states[-2] |
| prompt_embeds = prompt_embeds.to(dtype=text_encoder.dtype, device=device) |
|
|
| _, seq_len, _ = prompt_embeds.shape |
| |
| prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
| prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
|
|
| return prompt_embeds, pooled_prompt_embeds |
|
|
|
|
| def encode_prompt( |
| text_encoders, |
| tokenizers, |
| prompt: str, |
| max_sequence_length, |
| device=None, |
| num_images_per_prompt: int = 1, |
| text_input_ids_list=None, |
| ): |
| """Encode prompt using all three text encoders (SD3 architecture).""" |
| if prompt is not None: |
| prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
| |
| clip_tokenizers = tokenizers[:2] |
| clip_text_encoders = text_encoders[:2] |
|
|
| clip_prompt_embeds_list = [] |
| clip_pooled_prompt_embeds_list = [] |
| |
| for i, (tokenizer, text_encoder) in enumerate(zip(clip_tokenizers, clip_text_encoders)): |
| prompt_embeds, pooled_prompt_embeds = _encode_prompt_with_clip( |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| prompt=prompt, |
| device=device if device is not None else text_encoder.device, |
| num_images_per_prompt=num_images_per_prompt, |
| text_input_ids=text_input_ids_list[i] if text_input_ids_list else None, |
| ) |
| clip_prompt_embeds_list.append(prompt_embeds) |
| clip_pooled_prompt_embeds_list.append(pooled_prompt_embeds) |
|
|
| |
| clip_prompt_embeds = torch.cat(clip_prompt_embeds_list, dim=-1) |
| pooled_prompt_embeds = torch.cat(clip_pooled_prompt_embeds_list, dim=-1) |
|
|
| |
| t5_prompt_embed = _encode_prompt_with_t5( |
| text_encoders[-1], |
| tokenizers[-1], |
| max_sequence_length, |
| prompt=prompt, |
| num_images_per_prompt=num_images_per_prompt, |
| text_input_ids=text_input_ids_list[-1] if text_input_ids_list else None, |
| device=device if device is not None else text_encoders[-1].device, |
| ) |
|
|
| |
| clip_prompt_embeds = torch.nn.functional.pad( |
| clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) |
| ) |
| |
| prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) |
|
|
| return prompt_embeds, pooled_prompt_embeds |
|
|
|
|
| def load_dataset_from_jsonl(metadata_path, data_dir, accelerator=None): |
| """ |
| 从 metadata.jsonl 文件加载数据集,避免扫描所有文件。 |
| 这对于大型数据集在分布式训练中非常重要。 |
| |
| 注意:只让主进程读取 jsonl 文件,然后创建数据集。 |
| 其他进程会等待主进程完成后再继续。 |
| |
| Args: |
| metadata_path: metadata.jsonl 文件路径 |
| data_dir: 数据集根目录 |
| accelerator: Accelerator 对象,用于多进程同步 |
| |
| Returns: |
| datasets.DatasetDict |
| """ |
| if accelerator is None or accelerator.is_main_process: |
| print(f"[INFO] Loading dataset from metadata.jsonl: {metadata_path}", flush=True) |
| |
| |
| data_list = [] |
| if os.path.exists(metadata_path): |
| with open(metadata_path, 'r', encoding='utf-8') as f: |
| for line_num, line in enumerate(f): |
| try: |
| item = json.loads(line.strip()) |
| file_name = item.get('file_name', '') |
| caption = item.get('caption', '') |
| |
| |
| image_path = os.path.join(data_dir, file_name) |
| |
| |
| |
| |
| |
| data_list.append({ |
| 'image': image_path, |
| 'text': caption |
| }) |
| |
| |
| if (line_num + 1) % 100000 == 0 and (accelerator is None or accelerator.is_main_process): |
| print(f"[INFO] Processed {line_num + 1} entries from metadata.jsonl", flush=True) |
| |
| except json.JSONDecodeError as e: |
| if accelerator is None or accelerator.is_main_process: |
| print(f"[WARNING] Skipping invalid JSON at line {line_num + 1}: {e}", flush=True) |
| continue |
| |
| if accelerator is None or accelerator.is_main_process: |
| print(f"[INFO] Loaded {len(data_list)} image-caption pairs from metadata.jsonl", flush=True) |
| else: |
| raise FileNotFoundError(f"metadata.jsonl not found at: {metadata_path}") |
| |
| |
| |
| |
| dataset = datasets.Dataset.from_list(data_list) |
| |
| return datasets.DatasetDict({'train': dataset}) |
|
|
|
|
| def main(args): |
| """Main training function.""" |
| if args.report_to == "wandb" and args.hub_token is not None: |
| raise ValueError( |
| "You cannot use both --report_to=wandb and --hub_token due to security risk." |
| ) |
|
|
| logging_dir = Path(args.output_dir, args.logging_dir) |
|
|
| if torch.backends.mps.is_available() and args.mixed_precision == "bf16": |
| raise ValueError( |
| "Mixed precision training with bfloat16 is not supported on MPS." |
| ) |
|
|
| |
| if torch.cuda.is_available(): |
| num_gpus = torch.cuda.device_count() |
| print(f"Found {num_gpus} GPUs available") |
| if num_gpus > 1: |
| print(f"Multi-GPU training enabled with {num_gpus} GPUs") |
| else: |
| print("No CUDA GPUs found, training on CPU") |
|
|
| accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
| |
| kwargs = DistributedDataParallelKwargs( |
| find_unused_parameters=True, |
| gradient_as_bucket_view=True, |
| static_graph=False, |
| ) |
| accelerator = Accelerator( |
| gradient_accumulation_steps=args.gradient_accumulation_steps, |
| mixed_precision=args.mixed_precision, |
| log_with=args.report_to, |
| project_config=accelerator_project_config, |
| kwargs_handlers=[kwargs], |
| ) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO, |
| ) |
| logger.info(accelerator.state, main_process_only=False) |
| if accelerator.is_main_process: |
| print("[INFO] Accelerator initialized", flush=True) |
| |
| |
| if accelerator.is_main_process: |
| logger.info(f"Number of processes: {accelerator.num_processes}") |
| logger.info(f"Distributed type: {accelerator.distributed_type}") |
| logger.info(f"Mixed precision: {accelerator.mixed_precision}") |
| if torch.cuda.is_available(): |
| for i in range(torch.cuda.device_count()): |
| logger.info(f"GPU {i}: {torch.cuda.get_device_name(i)}") |
| logger.info(f"GPU {i} memory: {torch.cuda.get_device_properties(i).total_memory / 1024**3:.1f} GB") |
| |
| if accelerator.is_local_main_process: |
| datasets.utils.logging.set_verbosity_warning() |
| transformers.utils.logging.set_verbosity_warning() |
| diffusers.utils.logging.set_verbosity_info() |
| else: |
| datasets.utils.logging.set_verbosity_error() |
| transformers.utils.logging.set_verbosity_error() |
| diffusers.utils.logging.set_verbosity_error() |
|
|
| |
| if args.seed is not None: |
| set_seed(args.seed) |
| if accelerator.is_main_process: |
| print(f"[INFO] Seed set to {args.seed}", flush=True) |
|
|
| |
| if accelerator.is_main_process: |
| if args.output_dir is not None: |
| os.makedirs(args.output_dir, exist_ok=True) |
|
|
| if args.push_to_hub: |
| repo_id = create_repo( |
| repo_id=args.hub_model_id or Path(args.output_dir).name, |
| exist_ok=True, |
| token=args.hub_token |
| ).repo_id |
|
|
| if accelerator.is_main_process: |
| print("[INFO] Loading tokenizers...", flush=True) |
|
|
| |
| tokenizer_one = CLIPTokenizer.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="tokenizer", |
| revision=args.revision, |
| ) |
| tokenizer_two = CLIPTokenizer.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="tokenizer_2", |
| revision=args.revision, |
| ) |
|
|
| if accelerator.is_main_process: |
| print("[INFO] Tokenizers loaded. Loading text encoders, VAE, and transformer...", flush=True) |
| tokenizer_three = T5TokenizerFast.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="tokenizer_3", |
| revision=args.revision, |
| ) |
|
|
| |
| text_encoder_cls_one = import_model_class_from_model_name_or_path( |
| args.pretrained_model_name_or_path, args.revision |
| ) |
| text_encoder_cls_two = import_model_class_from_model_name_or_path( |
| args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" |
| ) |
| text_encoder_cls_three = import_model_class_from_model_name_or_path( |
| args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_3" |
| ) |
|
|
| |
| noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="scheduler" |
| ) |
| noise_scheduler_copy = copy.deepcopy(noise_scheduler) |
| |
| text_encoder_one = text_encoder_cls_one.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="text_encoder", |
| revision=args.revision, |
| variant=args.variant |
| ) |
| text_encoder_two = text_encoder_cls_two.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="text_encoder_2", |
| revision=args.revision, |
| variant=args.variant |
| ) |
| text_encoder_three = text_encoder_cls_three.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="text_encoder_3", |
| revision=args.revision, |
| variant=args.variant |
| ) |
| |
| vae = AutoencoderKL.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="vae", |
| revision=args.revision, |
| variant=args.variant, |
| ) |
| |
| transformer = SD3Transformer2DModel.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="transformer", |
| revision=args.revision, |
| variant=args.variant |
| ) |
|
|
| if accelerator.is_main_process: |
| print("[INFO] Text encoders, VAE, and transformer loaded", flush=True) |
|
|
| |
| transformer.requires_grad_(False) |
| vae.requires_grad_(False) |
| text_encoder_one.requires_grad_(False) |
| text_encoder_two.requires_grad_(False) |
| text_encoder_three.requires_grad_(False) |
|
|
| |
| weight_dtype = torch.float32 |
| if accelerator.mixed_precision == "fp16": |
| weight_dtype = torch.float16 |
| elif accelerator.mixed_precision == "bf16": |
| weight_dtype = torch.bfloat16 |
|
|
| |
| vae.to(accelerator.device, dtype=torch.float32) |
| transformer.to(accelerator.device, dtype=weight_dtype) |
| text_encoder_one.to(accelerator.device, dtype=weight_dtype) |
| text_encoder_two.to(accelerator.device, dtype=weight_dtype) |
| text_encoder_three.to(accelerator.device, dtype=weight_dtype) |
|
|
| |
| if args.gradient_checkpointing: |
| transformer.enable_gradient_checkpointing() |
| if args.train_text_encoder: |
| text_encoder_one.gradient_checkpointing_enable() |
| text_encoder_two.gradient_checkpointing_enable() |
|
|
| |
| transformer_lora_config = LoraConfig( |
| r=args.rank, |
| lora_alpha=args.rank, |
| init_lora_weights="gaussian", |
| target_modules=["attn.to_k", "attn.to_q", "attn.to_v", "attn.to_out.0"], |
| ) |
| transformer.add_adapter(transformer_lora_config) |
|
|
| |
| if args.train_text_encoder: |
| text_lora_config = LoraConfig( |
| r=args.rank, |
| lora_alpha=args.rank, |
| init_lora_weights="gaussian", |
| target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], |
| ) |
| text_encoder_one.add_adapter(text_lora_config) |
| text_encoder_two.add_adapter(text_lora_config) |
| |
|
|
| def unwrap_model(model): |
| model = accelerator.unwrap_model(model) |
| model = model._orig_mod if is_compiled_module(model) else model |
| return model |
|
|
| |
| if args.allow_tf32 and torch.cuda.is_available(): |
| torch.backends.cuda.matmul.allow_tf32 = True |
|
|
| |
| if args.scale_lr: |
| args.learning_rate = ( |
| args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| ) |
|
|
| |
| if args.mixed_precision == "fp16": |
| models = [transformer] |
| if args.train_text_encoder: |
| models.extend([text_encoder_one, text_encoder_two]) |
| cast_training_params(models, dtype=torch.float32) |
|
|
| |
| transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters())) |
| if args.train_text_encoder: |
| text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters())) |
| text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters())) |
| params_to_optimize = ( |
| transformer_lora_parameters |
| + text_lora_parameters_one |
| + text_lora_parameters_two |
| ) |
| else: |
| params_to_optimize = transformer_lora_parameters |
|
|
| |
| if args.use_8bit_adam: |
| try: |
| import bitsandbytes as bnb |
| except ImportError: |
| raise ImportError("To use 8-bit Adam, install bitsandbytes: pip install bitsandbytes") |
| optimizer_class = bnb.optim.AdamW8bit |
| else: |
| optimizer_class = torch.optim.AdamW |
|
|
| optimizer = optimizer_class( |
| params_to_optimize, |
| lr=args.learning_rate, |
| betas=(args.adam_beta1, args.adam_beta2), |
| weight_decay=args.adam_weight_decay, |
| eps=args.adam_epsilon, |
| ) |
|
|
| if accelerator.is_main_process: |
| print("[INFO] Optimizer created. Loading dataset...", flush=True) |
|
|
| |
| |
| with accelerator.main_process_first(): |
| metadata_path = None |
| if args.train_data_dir is not None: |
| |
| potential_metadata = os.path.join(args.train_data_dir, "metadata.jsonl") |
| if os.path.exists(potential_metadata): |
| metadata_path = potential_metadata |
| |
| if metadata_path is not None: |
| |
| if accelerator.is_main_process: |
| print(f"[INFO] Found metadata.jsonl, using efficient loading method", flush=True) |
| dataset = load_dataset_from_jsonl(metadata_path, args.train_data_dir, accelerator) |
| elif args.dataset_name is not None: |
| dataset = load_dataset( |
| args.dataset_name, |
| args.dataset_config_name, |
| cache_dir=args.cache_dir, |
| data_dir=args.train_data_dir |
| ) |
| else: |
| |
| if accelerator.is_main_process: |
| print("[WARNING] No metadata.jsonl found, using imagefolder (may be slow for large datasets)", flush=True) |
| data_files = {} |
| if args.train_data_dir is not None: |
| data_files["train"] = os.path.join(args.train_data_dir, "**") |
| dataset = load_dataset( |
| "imagefolder", |
| data_files=data_files, |
| cache_dir=args.cache_dir, |
| ) |
| if accelerator.is_main_process: |
| print("[INFO] Dataset loaded successfully.", flush=True) |
| |
| |
| accelerator.wait_for_everyone() |
| |
| if accelerator.is_main_process: |
| print("[INFO] All processes synchronized. Building transforms and DataLoader...", flush=True) |
|
|
| |
| column_names = dataset["train"].column_names |
| dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) |
| |
| if accelerator.is_main_process: |
| print(f"[INFO] Dataset columns: {column_names}", flush=True) |
| |
| |
| if args.image_column is not None and args.image_column in column_names: |
| |
| image_column = args.image_column |
| else: |
| |
| if 'image' in column_names: |
| image_column = 'image' |
| else: |
| image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] |
| |
| |
| if args.image_column is not None and args.image_column != image_column: |
| if accelerator.is_main_process: |
| print(f"[WARNING] Specified image_column '{args.image_column}' not found. Using '{image_column}' instead.", flush=True) |
| |
| if accelerator.is_main_process: |
| print(f"[INFO] Using image column: {image_column}", flush=True) |
| |
| |
| if args.caption_column is not None and args.caption_column in column_names: |
| |
| caption_column = args.caption_column |
| else: |
| |
| if 'text' in column_names: |
| caption_column = 'text' |
| elif 'caption' in column_names: |
| caption_column = 'caption' |
| else: |
| caption_column = dataset_columns[1] if dataset_columns is not None else (column_names[1] if len(column_names) > 1 else column_names[0]) |
| |
| |
| if args.caption_column is not None and args.caption_column != caption_column: |
| if accelerator.is_main_process: |
| print(f"[WARNING] Specified caption_column '{args.caption_column}' not found. Using '{caption_column}' instead.", flush=True) |
| |
| if accelerator.is_main_process: |
| print(f"[INFO] Using caption column: {caption_column}", flush=True) |
|
|
| def tokenize_captions(examples, is_train=True): |
| captions = [] |
| for caption in examples[caption_column]: |
| if isinstance(caption, str): |
| captions.append(caption) |
| elif isinstance(caption, (list, np.ndarray)): |
| captions.append(random.choice(caption) if is_train else caption[0]) |
| else: |
| raise ValueError(f"Caption column should contain strings or lists of strings.") |
| |
| tokens_one = tokenize_prompt(tokenizer_one, captions) |
| tokens_two = tokenize_prompt(tokenizer_two, captions) |
| tokens_three = tokenize_prompt(tokenizer_three, captions) |
| return tokens_one, tokens_two, tokens_three |
|
|
| |
| train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR) |
| train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) |
| train_flip = transforms.RandomHorizontalFlip(p=1.0) |
| train_transforms = transforms.Compose([ |
| transforms.ToTensor(), |
| transforms.Normalize([0.5], [0.5]), |
| ]) |
|
|
| def preprocess_train(examples): |
| |
| images = [] |
| for img in examples[image_column]: |
| if isinstance(img, str): |
| |
| try: |
| img = Image.open(img).convert("RGB") |
| except Exception as e: |
| |
| if accelerator.is_main_process: |
| print(f"[WARNING] Failed to load image {img}: {e}", flush=True) |
| img = Image.new('RGB', (args.resolution, args.resolution), color='black') |
| elif hasattr(img, 'convert'): |
| |
| img = img.convert("RGB") |
| else: |
| raise ValueError(f"Unexpected image type: {type(img)}") |
| images.append(img) |
| original_sizes = [] |
| all_images = [] |
| crop_top_lefts = [] |
| |
| for image in images: |
| original_sizes.append((image.height, image.width)) |
| image = train_resize(image) |
| if args.random_flip and random.random() < 0.5: |
| image = train_flip(image) |
| if args.center_crop: |
| y1 = max(0, int(round((image.height - args.resolution) / 2.0))) |
| x1 = max(0, int(round((image.width - args.resolution) / 2.0))) |
| image = train_crop(image) |
| else: |
| y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) |
| image = crop(image, y1, x1, h, w) |
| crop_top_left = (y1, x1) |
| crop_top_lefts.append(crop_top_left) |
| image = train_transforms(image) |
| all_images.append(image) |
|
|
| examples["original_sizes"] = original_sizes |
| examples["crop_top_lefts"] = crop_top_lefts |
| examples["pixel_values"] = all_images |
| |
| tokens_one, tokens_two, tokens_three = tokenize_captions(examples) |
| examples["input_ids_one"] = tokens_one |
| examples["input_ids_two"] = tokens_two |
| examples["input_ids_three"] = tokens_three |
| return examples |
|
|
| with accelerator.main_process_first(): |
| if args.max_train_samples is not None: |
| dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) |
| train_dataset = dataset["train"].with_transform(preprocess_train, output_all_columns=True) |
|
|
| def collate_fn(examples): |
| pixel_values = torch.stack([example["pixel_values"] for example in examples]) |
| pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() |
| original_sizes = [example["original_sizes"] for example in examples] |
| crop_top_lefts = [example["crop_top_lefts"] for example in examples] |
| input_ids_one = torch.stack([example["input_ids_one"] for example in examples]) |
| input_ids_two = torch.stack([example["input_ids_two"] for example in examples]) |
| input_ids_three = torch.stack([example["input_ids_three"] for example in examples]) |
| |
| return { |
| "pixel_values": pixel_values, |
| "input_ids_one": input_ids_one, |
| "input_ids_two": input_ids_two, |
| "input_ids_three": input_ids_three, |
| "original_sizes": original_sizes, |
| "crop_top_lefts": crop_top_lefts, |
| } |
|
|
| |
| if args.dataloader_num_workers == 0 and accelerator.num_processes > 1: |
| |
| args.dataloader_num_workers = min(4, os.cpu_count() // accelerator.num_processes) |
| logger.info(f"Auto-setting dataloader_num_workers to {args.dataloader_num_workers} for multi-GPU training") |
|
|
| train_dataloader = torch.utils.data.DataLoader( |
| train_dataset, |
| shuffle=True, |
| collate_fn=collate_fn, |
| batch_size=args.train_batch_size, |
| num_workers=args.dataloader_num_workers, |
| pin_memory=True, |
| persistent_workers=args.dataloader_num_workers > 0, |
| ) |
|
|
| if accelerator.is_main_process: |
| print("[INFO] DataLoader ready. Computing training steps and scheduler...", flush=True) |
|
|
| |
| overrode_max_train_steps = False |
| num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| if args.max_train_steps is None: |
| args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| overrode_max_train_steps = True |
|
|
| lr_scheduler = get_scheduler( |
| args.lr_scheduler, |
| optimizer=optimizer, |
| num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, |
| num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, |
| ) |
|
|
| |
| if args.train_text_encoder: |
| transformer, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| transformer, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler |
| ) |
| else: |
| transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| transformer, optimizer, train_dataloader, lr_scheduler |
| ) |
|
|
| |
| num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| if overrode_max_train_steps: |
| args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
|
|
| |
| if accelerator.is_main_process: |
| try: |
| accelerator.init_trackers("text2image-fine-tune", config=vars(args)) |
| except Exception as e: |
| logger.warning(f"Failed to initialize trackers: {e}") |
| logger.warning("Continuing without tracking. You can monitor training through console logs.") |
| |
| args.report_to = None |
|
|
| |
| total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
| logger.info("***** Running training *****") |
| logger.info(f" Num examples = {len(train_dataset)}") |
| logger.info(f" Num Epochs = {args.num_train_epochs}") |
| logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| logger.info(f" Total optimization steps = {args.max_train_steps}") |
| logger.info(f" Number of GPU processes = {accelerator.num_processes}") |
| if accelerator.num_processes > 1: |
| logger.info(f" Effective batch size per GPU = {args.train_batch_size * args.gradient_accumulation_steps}") |
| logger.info(f" Total effective batch size across all GPUs = {total_batch_size}") |
| |
| global_step = 0 |
| first_epoch = 0 |
| if accelerator.is_main_process: |
| print( |
| f"[INFO] Training setup complete. num_examples={len(train_dataset)}, " |
| f"max_train_steps={args.max_train_steps}, num_epochs={args.num_train_epochs}", |
| flush=True, |
| ) |
|
|
| |
| if args.resume_from_checkpoint: |
| if args.resume_from_checkpoint != "latest": |
| path = os.path.basename(args.resume_from_checkpoint) |
| else: |
| dirs = os.listdir(args.output_dir) |
| dirs = [d for d in dirs if d.startswith("checkpoint")] |
| dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| path = dirs[-1] if len(dirs) > 0 else None |
|
|
| if path is None: |
| accelerator.print(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting new training.") |
| args.resume_from_checkpoint = None |
| initial_global_step = 0 |
| else: |
| accelerator.print(f"Resuming from checkpoint {path}") |
| accelerator.load_state(os.path.join(args.output_dir, path)) |
| global_step = int(path.split("-")[1]) |
| initial_global_step = global_step |
| first_epoch = global_step // num_update_steps_per_epoch |
| else: |
| initial_global_step = 0 |
|
|
| progress_bar = tqdm( |
| range(0, args.max_train_steps), |
| initial=initial_global_step, |
| desc="Steps", |
| disable=not accelerator.is_local_main_process, |
| ) |
|
|
| def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): |
| sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) |
| schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) |
| timesteps = timesteps.to(accelerator.device) |
| step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] |
| sigma = sigmas[step_indices].flatten() |
| while len(sigma.shape) < n_dim: |
| sigma = sigma.unsqueeze(-1) |
| return sigma |
|
|
| |
| for epoch in range(first_epoch, args.num_train_epochs): |
| transformer.train() |
| if args.train_text_encoder: |
| text_encoder_one.train() |
| text_encoder_two.train() |
|
|
| if accelerator.is_main_process: |
| print( |
| f"[INFO] Starting epoch {epoch + 1}/{args.num_train_epochs}, current global_step={global_step}", |
| flush=True, |
| ) |
| |
| train_loss = 0.0 |
| for step, batch in enumerate(train_dataloader): |
| with accelerator.accumulate(transformer): |
| |
| pixel_values = batch["pixel_values"].to(dtype=vae.dtype) |
| model_input = vae.encode(pixel_values).latent_dist.sample() |
| |
| |
| vae_config_shift_factor = vae.config.shift_factor |
| vae_config_scaling_factor = vae.config.scaling_factor |
| model_input = (model_input - vae_config_shift_factor) * vae_config_scaling_factor |
| model_input = model_input.to(dtype=weight_dtype) |
|
|
| |
| prompt_embeds, pooled_prompt_embeds = encode_prompt( |
| text_encoders=[text_encoder_one, text_encoder_two, text_encoder_three], |
| tokenizers=[tokenizer_one, tokenizer_two, tokenizer_three], |
| prompt=None, |
| max_sequence_length=args.max_sequence_length, |
| text_input_ids_list=[batch["input_ids_one"], batch["input_ids_two"], batch["input_ids_three"]], |
| ) |
|
|
| |
| noise = torch.randn_like(model_input) |
| bsz = model_input.shape[0] |
| |
| |
| u = compute_density_for_timestep_sampling( |
| weighting_scheme=args.weighting_scheme, |
| batch_size=bsz, |
| logit_mean=args.logit_mean, |
| logit_std=args.logit_std, |
| mode_scale=args.mode_scale, |
| ) |
| indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() |
| timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device) |
|
|
| |
| sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype) |
| noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise |
|
|
| |
| model_pred = transformer( |
| hidden_states=noisy_model_input, |
| timestep=timesteps, |
| encoder_hidden_states=prompt_embeds, |
| pooled_projections=pooled_prompt_embeds, |
| return_dict=False, |
| )[0] |
|
|
| |
| if args.precondition_outputs: |
| model_pred = model_pred * (-sigmas) + noisy_model_input |
| target = model_input |
| else: |
| target = noise - model_input |
|
|
| |
| weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) |
| loss = torch.mean( |
| (weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), |
| 1, |
| ) |
| loss = loss.mean() |
|
|
| |
| avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() |
| train_loss += avg_loss.item() / args.gradient_accumulation_steps |
|
|
| |
| accelerator.backward(loss) |
| if accelerator.sync_gradients: |
| accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm) |
| optimizer.step() |
| lr_scheduler.step() |
| optimizer.zero_grad() |
|
|
| |
| if accelerator.sync_gradients: |
| progress_bar.update(1) |
| global_step += 1 |
| if hasattr(accelerator, 'trackers') and accelerator.trackers: |
| accelerator.log({"train_loss": train_loss}, step=global_step) |
| train_loss = 0.0 |
|
|
| if accelerator.is_main_process and global_step % 1000 == 0: |
| print( |
| f"[INFO] Optimization step completed at global_step={global_step}, " |
| f"recent step_loss={loss.detach().item():.4f}", |
| flush=True, |
| ) |
|
|
| |
| if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process: |
| if global_step % args.checkpointing_steps == 0: |
| if args.checkpoints_total_limit is not None: |
| checkpoints = os.listdir(args.output_dir) |
| checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
| checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
|
|
| if len(checkpoints) >= args.checkpoints_total_limit: |
| num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
| removing_checkpoints = checkpoints[0:num_to_remove] |
| logger.info(f"Removing {len(removing_checkpoints)} checkpoints") |
| for removing_checkpoint in removing_checkpoints: |
| removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
| shutil.rmtree(removing_checkpoint) |
|
|
| save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| accelerator.save_state(save_path) |
| logger.info(f"Saved state to {save_path}") |
| |
| |
| try: |
| |
| unwrapped_transformer = unwrap_model(transformer) |
| transformer_lora_layers = get_peft_model_state_dict(unwrapped_transformer) |
| |
| text_encoder_lora_layers = None |
| text_encoder_2_lora_layers = None |
| if args.train_text_encoder: |
| unwrapped_text_encoder_one = unwrap_model(text_encoder_one) |
| unwrapped_text_encoder_two = unwrap_model(text_encoder_two) |
| text_encoder_lora_layers = get_peft_model_state_dict(unwrapped_text_encoder_one) |
| text_encoder_2_lora_layers = get_peft_model_state_dict(unwrapped_text_encoder_two) |
| |
| |
| StableDiffusion3Pipeline.save_lora_weights( |
| save_directory=save_path, |
| transformer_lora_layers=transformer_lora_layers, |
| text_encoder_lora_layers=text_encoder_lora_layers, |
| text_encoder_2_lora_layers=text_encoder_2_lora_layers, |
| ) |
| logger.info(f"Saved LoRA weights in standard format to {save_path}") |
| except Exception as e: |
| logger.warning(f"Failed to save LoRA weights in standard format: {e}") |
| logger.warning("Checkpoint saved with accelerator format only. You can extract LoRA weights later.") |
|
|
| logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| progress_bar.set_postfix(**logs) |
|
|
| if global_step >= args.max_train_steps: |
| break |
|
|
| |
| if accelerator.is_main_process: |
| if args.validation_prompt is not None : |
| print(f"[INFO] Running validation for epoch {epoch + 1}, global_step={global_step}", flush=True) |
| pipeline = StableDiffusion3Pipeline.from_pretrained( |
| args.pretrained_model_name_or_path, |
| vae=vae, |
| text_encoder=unwrap_model(text_encoder_one), |
| text_encoder_2=unwrap_model(text_encoder_two), |
| text_encoder_3=unwrap_model(text_encoder_three), |
| transformer=unwrap_model(transformer), |
| revision=args.revision, |
| variant=args.variant, |
| torch_dtype=weight_dtype, |
| ) |
| images = log_validation(pipeline, args, accelerator, epoch, global_step=global_step) |
| del pipeline |
| torch.cuda.empty_cache() |
|
|
| |
| accelerator.wait_for_everyone() |
| if accelerator.is_main_process: |
| transformer = unwrap_model(transformer) |
| transformer_lora_layers = get_peft_model_state_dict(transformer) |
|
|
| if args.train_text_encoder: |
| text_encoder_one = unwrap_model(text_encoder_one) |
| text_encoder_two = unwrap_model(text_encoder_two) |
| text_encoder_lora_layers = get_peft_model_state_dict(text_encoder_one) |
| text_encoder_2_lora_layers = get_peft_model_state_dict(text_encoder_two) |
| else: |
| text_encoder_lora_layers = None |
| text_encoder_2_lora_layers = None |
|
|
| StableDiffusion3Pipeline.save_lora_weights( |
| save_directory=args.output_dir, |
| transformer_lora_layers=transformer_lora_layers, |
| text_encoder_lora_layers=text_encoder_lora_layers, |
| text_encoder_2_lora_layers=text_encoder_2_lora_layers, |
| ) |
|
|
| |
| if args.mixed_precision == "fp16": |
| vae.to(weight_dtype) |
| |
| pipeline = StableDiffusion3Pipeline.from_pretrained( |
| args.pretrained_model_name_or_path, |
| vae=vae, |
| revision=args.revision, |
| variant=args.variant, |
| torch_dtype=weight_dtype, |
| ) |
| pipeline.load_lora_weights(args.output_dir) |
|
|
| if args.validation_prompt and args.num_validation_images > 0: |
| images = log_validation(pipeline, args, accelerator, epoch, is_final_validation=True, global_step=global_step) |
|
|
| if args.push_to_hub: |
| save_model_card( |
| repo_id, |
| images=images, |
| base_model=args.pretrained_model_name_or_path, |
| dataset_name=args.dataset_name, |
| train_text_encoder=args.train_text_encoder, |
| repo_folder=args.output_dir, |
| ) |
| upload_folder( |
| repo_id=repo_id, |
| folder_path=args.output_dir, |
| commit_message="End of training", |
| ignore_patterns=["step_*", "epoch_*"], |
| ) |
|
|
| accelerator.end_training() |
|
|
|
|
| if __name__ == "__main__": |
| args = parse_args() |
| main(args) |
|
|