Upload app.py
Browse files
app.py
CHANGED
|
@@ -1,50 +1,54 @@
|
|
| 1 |
-
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
#
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
#
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
#
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
# Dummy overlay: just blend human + garment
|
| 23 |
-
output = cv2.addWeighted(human, 0.7, garment, 0.3, 0)
|
| 24 |
|
| 25 |
# Convert back to PIL
|
| 26 |
-
|
| 27 |
-
return Image.fromarray(
|
| 28 |
-
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
tryon_btn.click(
|
| 45 |
-
fn=vton_tryon,
|
| 46 |
-
inputs=[human_img, garment_img, auto_mask, auto_resize, description],
|
| 47 |
-
outputs=output_img
|
| 48 |
-
)
|
| 49 |
-
|
| 50 |
-
demo.launch()
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
def vton_tryon(human: Image.Image, garment: Image.Image) -> Image.Image:
|
| 8 |
+
# Convert PIL to OpenCV format
|
| 9 |
+
human = cv2.cvtColor(np.array(human), cv2.COLOR_RGB2BGR)
|
| 10 |
+
garment = cv2.cvtColor(np.array(garment), cv2.COLOR_RGB2BGR)
|
| 11 |
+
|
| 12 |
+
# Resize garment to fit human image width
|
| 13 |
+
scale_factor = human.shape[1] / garment.shape[1] * 0.5 # garment takes half the human width
|
| 14 |
+
new_size = (int(garment.shape[1] * scale_factor), int(garment.shape[0] * scale_factor))
|
| 15 |
+
garment_resized = cv2.resize(garment, new_size, interpolation=cv2.INTER_AREA)
|
| 16 |
+
|
| 17 |
+
# Position: top center of human image
|
| 18 |
+
x_offset = (human.shape[1] - garment_resized.shape[1]) // 2
|
| 19 |
+
y_offset = human.shape[0] // 4 # roughly chest area
|
| 20 |
|
| 21 |
+
# Create mask for garment
|
| 22 |
+
gray = cv2.cvtColor(garment_resized, cv2.COLOR_BGR2GRAY)
|
| 23 |
+
_, mask = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
|
| 24 |
+
mask_inv = cv2.bitwise_not(mask)
|
| 25 |
+
|
| 26 |
+
# Region of interest (ROI) on human
|
| 27 |
+
roi = human[y_offset:y_offset+garment_resized.shape[0], x_offset:x_offset+garment_resized.shape[1]]
|
| 28 |
+
|
| 29 |
+
# Mask human and garment
|
| 30 |
+
human_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
|
| 31 |
+
garment_fg = cv2.bitwise_and(garment_resized, garment_resized, mask=mask)
|
| 32 |
+
|
| 33 |
+
# Merge garment with human
|
| 34 |
+
dst = cv2.add(human_bg, garment_fg)
|
| 35 |
+
human[y_offset:y_offset+garment_resized.shape[0], x_offset:x_offset+garment_resized.shape[1]] = dst
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
# Convert back to PIL
|
| 38 |
+
result = cv2.cvtColor(human, cv2.COLOR_BGR2RGB)
|
| 39 |
+
return Image.fromarray(result)
|
| 40 |
+
|
| 41 |
+
# Gradio Interface
|
| 42 |
+
iface = gr.Interface(
|
| 43 |
+
fn=vton_tryon,
|
| 44 |
+
inputs=[
|
| 45 |
+
gr.Image(type="pil", label="Upload Human Image"),
|
| 46 |
+
gr.Image(type="pil", label="Upload Garment Image")
|
| 47 |
+
],
|
| 48 |
+
outputs=gr.Image(type="pil", label="Try-On Result"),
|
| 49 |
+
title="IDM-VTON 👕 Virtual Try-On",
|
| 50 |
+
description="Upload a human image and a garment image to see the try-on result dynamically."
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|