Spaces:
Runtime error
Runtime error
| """ | |
| app.py β OOTDiffusion Hugging Face Space | |
| Place this file in the ROOT of your Space repo. | |
| Your Space structure should look like: | |
| OOODdiffusion/ | |
| βββ app.py β this file (root level) | |
| βββ requirements.txt β root level | |
| βββ README.md β root level | |
| βββ OOTDiffusion-main/ β the uploaded zip contents | |
| βββ ootd/ | |
| βββ run/ | |
| βββ preprocess/ | |
| βββ checkpoints/ | |
| βββ ... | |
| """ | |
| import sys | |
| import os | |
| # ββ Path setup ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
| # Support both flat layout and nested OOTDiffusion-main/ layout | |
| OOTD_DIR = ROOT_DIR | |
| for candidate in ["OOTDiffusion-main", "OOTDiffusion"]: | |
| candidate_path = os.path.join(ROOT_DIR, candidate) | |
| if os.path.isdir(candidate_path): | |
| OOTD_DIR = candidate_path | |
| break | |
| RUN_DIR = os.path.join(OOTD_DIR, "run") | |
| sys.path.insert(0, OOTD_DIR) | |
| sys.path.insert(0, RUN_DIR) | |
| print(f"[OOTDiffusion] ROOT_DIR : {ROOT_DIR}") | |
| print(f"[OOTDiffusion] OOTD_DIR : {OOTD_DIR}") | |
| import torch | |
| import numpy as np | |
| import gradio as gr | |
| from PIL import Image | |
| # ββ Device ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"[OOTDiffusion] Device: {DEVICE}") | |
| # ββ Lazy-load models ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| _pipe_hd = None | |
| _pipe_dc = None | |
| def load_pipeline(model_type: str): | |
| global _pipe_hd, _pipe_dc | |
| if model_type == "hd": | |
| if _pipe_hd is None: | |
| from ootd.inference_ootd_hd import OOTDiffusionHD | |
| print("[OOTDiffusion] Loading HD pipeline β¦") | |
| _pipe_hd = OOTDiffusionHD(OOTD_DIR) | |
| return _pipe_hd | |
| else: | |
| if _pipe_dc is None: | |
| from ootd.inference_ootd_dc import OOTDiffusionDC | |
| print("[OOTDiffusion] Loading DC pipeline β¦") | |
| _pipe_dc = OOTDiffusionDC(OOTD_DIR) | |
| return _pipe_dc | |
| # ββ Category mapping ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| CATEGORY_MAP = { | |
| "Upper-body": 0, | |
| "Lower-body": 1, | |
| "Dress": 2, | |
| } | |
| # ββ Inference βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| def run_tryon(model_image, cloth_image, model_type, category_label, | |
| n_samples, n_steps, guidance_scale, seed): | |
| if model_image is None: | |
| raise gr.Error("Please upload a model (person) image.") | |
| if cloth_image is None: | |
| raise gr.Error("Please upload a garment image.") | |
| if isinstance(model_image, np.ndarray): | |
| model_image = Image.fromarray(model_image) | |
| if isinstance(cloth_image, np.ndarray): | |
| cloth_image = Image.fromarray(cloth_image) | |
| model_image = model_image.convert("RGB") | |
| cloth_image = cloth_image.convert("RGB") | |
| category_idx = CATEGORY_MAP[category_label] | |
| try: | |
| pipe = load_pipeline(model_type) | |
| except Exception as e: | |
| raise gr.Error( | |
| f"Failed to load model: {e}\n" | |
| "Make sure OOTDiffusion-main/ folder with ootd/ and checkpoints/ is present." | |
| ) | |
| try: | |
| result = pipe( | |
| model_type=model_type, | |
| category=category_idx, | |
| image_garm=cloth_image, | |
| image_vton=model_image, | |
| mask=None, | |
| image_ori=model_image, | |
| num_samples=int(n_samples), | |
| num_steps=int(n_steps), | |
| guidance_scale=float(guidance_scale), | |
| seed=int(seed), | |
| ) | |
| except Exception as e: | |
| raise gr.Error(f"Inference failed: {e}") | |
| if isinstance(result, (list, tuple)): | |
| return result | |
| return [result] | |
| # ββ Gradio UI βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| with gr.Blocks(title="OOTDiffusion Virtual Try-On", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # π OOTDiffusion β Virtual Try-On | |
| **[AAAI 2025]** Upload a *person photo* and a *garment image*, then click **Run Try-On**. | |
| > β οΈ Non-commercial use only (CC-BY-NC-SA-4.0) | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| model_img = gr.Image(label="π€ Model Image (person)", type="pil", height=380) | |
| cloth_img = gr.Image(label="π Garment Image", type="pil", height=380) | |
| with gr.Column(scale=1): | |
| model_type = gr.Radio( | |
| choices=["hd", "dc"], value="hd", | |
| label="Model Type", | |
| info="hd = half-body (VITON-HD) | dc = full-body (Dress Code)" | |
| ) | |
| category = gr.Dropdown( | |
| choices=list(CATEGORY_MAP.keys()), value="Upper-body", | |
| label="Garment Category", | |
| info="Only matters when Model Type = dc" | |
| ) | |
| n_samples = gr.Slider(1, 4, step=1, value=1, label="Number of Samples") | |
| n_steps = gr.Slider(10, 40, step=5, value=20, label="Denoising Steps", | |
| info="More steps = better quality, slower") | |
| guidance = gr.Slider(1.0, 5.0, step=0.5, value=2.0, label="Guidance Scale") | |
| seed = gr.Number(value=42, label="Seed (-1 = random)", precision=0) | |
| run_btn = gr.Button("π Run Try-On", variant="primary", size="lg") | |
| with gr.Column(scale=1): | |
| output_gallery = gr.Gallery( | |
| label="β¨ Try-On Results", | |
| columns=2, height=500, object_fit="contain" | |
| ) | |
| gr.Markdown(""" | |
| ### π‘ Tips | |
| - **HD model** β best for upper-body garments on half-body photos | |
| - **DC model** β supports upper / lower / dress on full-body photos | |
| - Steps **30β40** give noticeably better quality | |
| - **Seed = -1** gives a different result each run | |
| """) | |
| run_btn.click( | |
| fn=run_tryon, | |
| inputs=[model_img, cloth_img, model_type, category, | |
| n_samples, n_steps, guidance, seed], | |
| outputs=output_gallery, | |
| ) | |
| # ββ Launch ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| if __name__ == "__main__": | |
| demo.launch() | |