fanboyd13 commited on
Commit
2b08552
Β·
verified Β·
1 Parent(s): d7aef1a

Upload app.py

Browse files

---
sdk: gradio
sdk_version: 4.16.0
app_file: OOTDiffusion-main/app.py
---
```
4. Commit β€” HF will now look inside the subfolder

---

### ⚠️ Bigger Issue I Also Notice

Your whole OOTDiffusion repo is **inside a subfolder** (`OOTDiffusion-main/`). This means the `app.py` path imports like `from ootd.inference_ootd_hd import ...` may also break.

**Best fix long-term:** Move all contents of `OOTDiffusion-main/` up to the root so your Space looks like:
```
OOODdiffusion/
β”œβ”€β”€ app.py ← root level
β”œβ”€β”€ ootd/
β”œβ”€β”€ run/
β”œβ”€β”€ preprocess/
β”œβ”€β”€ checkpoints/
β”œβ”€β”€ requirements.txt
└── README.md

Files changed (1) hide show
  1. app.py +238 -0
app.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ app.py β€” OOTDiffusion Hugging Face Space
3
+ Place this file in the ROOT of your Space repo alongside the
4
+ OOTDiffusion source folders: ootd/, run/, preprocess/, checkpoints/
5
+
6
+ README.md front-matter required:
7
+ ---
8
+ title: OOTDiffusion Virtual Try-On
9
+ emoji: πŸ‘—
10
+ colorFrom: purple
11
+ colorTo: pink
12
+ sdk: gradio
13
+ sdk_version: 4.16.0
14
+ app_file: app.py
15
+ pinned: false
16
+ license: cc-by-nc-sa-4.0
17
+ ---
18
+ """
19
+
20
+ import sys
21
+ import os
22
+
23
+ # ── Path setup ────────────────────────────────────────────────────────────────
24
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
25
+ RUN_DIR = os.path.join(ROOT_DIR, "run")
26
+ sys.path.insert(0, ROOT_DIR)
27
+ sys.path.insert(0, RUN_DIR)
28
+
29
+ import torch
30
+ import numpy as np
31
+ import gradio as gr
32
+ from PIL import Image
33
+
34
+ # ── Device ────────────────────────────────────────────────────────────────────
35
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
36
+ print(f"[OOTDiffusion] Device: {DEVICE}")
37
+
38
+ # ── Lazy-load models (loaded once on first request) ───────────────────────────
39
+ _pipe_hd = None # VITON-HD β€” half-body
40
+ _pipe_dc = None # Dress Code β€” full-body
41
+
42
+
43
+ def load_pipeline(model_type: str):
44
+ """Import and cache the requested OOTDiffusion pipeline."""
45
+ global _pipe_hd, _pipe_dc
46
+
47
+ if model_type == "hd":
48
+ if _pipe_hd is None:
49
+ from ootd.inference_ootd_hd import OOTDiffusionHD
50
+ print("[OOTDiffusion] Loading HD pipeline …")
51
+ _pipe_hd = OOTDiffusionHD(ROOT_DIR)
52
+ return _pipe_hd
53
+
54
+ else: # dc
55
+ if _pipe_dc is None:
56
+ from ootd.inference_ootd_dc import OOTDiffusionDC
57
+ print("[OOTDiffusion] Loading DC pipeline …")
58
+ _pipe_dc = OOTDiffusionDC(ROOT_DIR)
59
+ return _pipe_dc
60
+
61
+
62
+ # ── Category mapping ──────────────────────────────────────────────────────────
63
+ CATEGORY_MAP = {
64
+ "Upper-body": 0,
65
+ "Lower-body": 1,
66
+ "Dress": 2,
67
+ }
68
+
69
+
70
+ # ── Main inference function ───────────────────────────────────────────────────
71
+ def run_tryon(
72
+ model_image,
73
+ cloth_image,
74
+ model_type,
75
+ category_label,
76
+ n_samples,
77
+ n_steps,
78
+ guidance_scale,
79
+ seed,
80
+ ):
81
+ if model_image is None:
82
+ raise gr.Error("Please upload a model (person) image.")
83
+ if cloth_image is None:
84
+ raise gr.Error("Please upload a garment image.")
85
+
86
+ # Convert to PIL just in case Gradio passes numpy arrays
87
+ if isinstance(model_image, np.ndarray):
88
+ model_image = Image.fromarray(model_image)
89
+ if isinstance(cloth_image, np.ndarray):
90
+ cloth_image = Image.fromarray(cloth_image)
91
+
92
+ model_image = model_image.convert("RGB")
93
+ cloth_image = cloth_image.convert("RGB")
94
+
95
+ category_idx = CATEGORY_MAP[category_label]
96
+
97
+ try:
98
+ pipe = load_pipeline(model_type)
99
+ except Exception as e:
100
+ raise gr.Error(
101
+ f"Failed to load model: {e}\n"
102
+ "Make sure checkpoints/ and ootd/ folders are present."
103
+ )
104
+
105
+ try:
106
+ if model_type == "hd":
107
+ result = pipe(
108
+ model_type="hd",
109
+ category=category_idx,
110
+ image_garm=cloth_image,
111
+ image_vton=model_image,
112
+ mask=None,
113
+ image_ori=model_image,
114
+ num_samples=int(n_samples),
115
+ num_steps=int(n_steps),
116
+ guidance_scale=guidance_scale,
117
+ seed=int(seed),
118
+ )
119
+ else:
120
+ result = pipe(
121
+ model_type="dc",
122
+ category=category_idx,
123
+ image_garm=cloth_image,
124
+ image_vton=model_image,
125
+ mask=None,
126
+ image_ori=model_image,
127
+ num_samples=int(n_samples),
128
+ num_steps=int(n_steps),
129
+ guidance_scale=guidance_scale,
130
+ seed=int(seed),
131
+ )
132
+ except Exception as e:
133
+ raise gr.Error(f"Inference failed: {e}")
134
+
135
+ # result is expected to be a list of PIL Images
136
+ if isinstance(result, (list, tuple)):
137
+ return result
138
+ return [result]
139
+
140
+
141
+ # ── Gradio UI ─────────────────────────────────────────────────────────────────
142
+ with gr.Blocks(title="OOTDiffusion Virtual Try-On", theme=gr.themes.Soft()) as demo:
143
+
144
+ gr.Markdown(
145
+ """
146
+ # πŸ‘— OOTDiffusion β€” Virtual Try-On
147
+ **[AAAI 2025]** Upload a *model photo* and a *garment image*, choose settings, and click **Run Try-On**.
148
+ > ⚠️ Non-commercial use only (CC-BY-NC-SA-4.0)
149
+ """
150
+ )
151
+
152
+ with gr.Row():
153
+ # ── Left column: inputs ───────────────────────────────────────────────
154
+ with gr.Column(scale=1):
155
+ model_img = gr.Image(
156
+ label="Model Image (person)",
157
+ type="pil",
158
+ height=400,
159
+ )
160
+ cloth_img = gr.Image(
161
+ label="Garment Image (clothing)",
162
+ type="pil",
163
+ height=400,
164
+ )
165
+
166
+ # ── Middle column: settings ───────────────────────────────────────────
167
+ with gr.Column(scale=1):
168
+ model_type = gr.Radio(
169
+ choices=["hd", "dc"],
170
+ value="hd",
171
+ label="Model Type",
172
+ info="hd = half-body (VITON-HD) | dc = full-body (Dress Code)",
173
+ )
174
+ category = gr.Dropdown(
175
+ choices=list(CATEGORY_MAP.keys()),
176
+ value="Upper-body",
177
+ label="Garment Category",
178
+ info="Only used when Model Type is 'dc'",
179
+ )
180
+ n_samples = gr.Slider(
181
+ minimum=1, maximum=4, step=1, value=1,
182
+ label="Number of Samples",
183
+ )
184
+ n_steps = gr.Slider(
185
+ minimum=10, maximum=40, step=5, value=20,
186
+ label="Denoising Steps",
187
+ info="More steps = better quality but slower",
188
+ )
189
+ guidance_scale = gr.Slider(
190
+ minimum=1.0, maximum=5.0, step=0.5, value=2.0,
191
+ label="Guidance Scale",
192
+ )
193
+ seed = gr.Number(
194
+ value=42,
195
+ label="Seed (-1 = random)",
196
+ precision=0,
197
+ )
198
+
199
+ run_btn = gr.Button("πŸš€ Run Try-On", variant="primary")
200
+
201
+ # ── Right column: outputs ─────────────────────────────────────────────
202
+ with gr.Column(scale=1):
203
+ output_gallery = gr.Gallery(
204
+ label="Try-On Results",
205
+ columns=2,
206
+ height=500,
207
+ object_fit="contain",
208
+ )
209
+
210
+ gr.Markdown(
211
+ """
212
+ ### Tips
213
+ - **HD model**: best for upper-body garments on half-body photos
214
+ - **DC model**: supports upper-body / lower-body / dress on full-body photos
215
+ - Increasing **steps** to 30–40 noticeably improves quality
216
+ - Set **seed = -1** for random results each run
217
+ """
218
+ )
219
+
220
+ # ── Wire up the button ────────────────────────────────────────────────────
221
+ run_btn.click(
222
+ fn=run_tryon,
223
+ inputs=[
224
+ model_img,
225
+ cloth_img,
226
+ model_type,
227
+ category,
228
+ n_samples,
229
+ n_steps,
230
+ guidance_scale,
231
+ seed,
232
+ ],
233
+ outputs=output_gallery,
234
+ )
235
+
236
+ # ── Launch ────────────────────────────────────────────────────────────────────
237
+ if __name__ == "__main__":
238
+ demo.launch()