| |
| import cv2 |
| import numpy as np |
| from PIL import Image |
| import gradio as gr |
|
|
| def vton_tryon(human: Image.Image, garment: Image.Image) -> Image.Image: |
| |
| human = cv2.cvtColor(np.array(human), cv2.COLOR_RGB2BGR) |
| garment = cv2.cvtColor(np.array(garment), cv2.COLOR_RGB2BGR) |
|
|
| |
| scale_factor = human.shape[1] / garment.shape[1] * 0.5 |
| new_size = (int(garment.shape[1] * scale_factor), int(garment.shape[0] * scale_factor)) |
| garment_resized = cv2.resize(garment, new_size, interpolation=cv2.INTER_AREA) |
|
|
| |
| x_offset = (human.shape[1] - garment_resized.shape[1]) // 2 |
| y_offset = human.shape[0] // 4 |
|
|
| |
| gray = cv2.cvtColor(garment_resized, cv2.COLOR_BGR2GRAY) |
| _, mask = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY) |
| mask_inv = cv2.bitwise_not(mask) |
|
|
| |
| roi = human[y_offset:y_offset+garment_resized.shape[0], x_offset:x_offset+garment_resized.shape[1]] |
|
|
| |
| human_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) |
| garment_fg = cv2.bitwise_and(garment_resized, garment_resized, mask=mask) |
|
|
| |
| dst = cv2.add(human_bg, garment_fg) |
| human[y_offset:y_offset+garment_resized.shape[0], x_offset:x_offset+garment_resized.shape[1]] = dst |
|
|
| |
| result = cv2.cvtColor(human, cv2.COLOR_BGR2RGB) |
| return Image.fromarray(result) |
|
|
| |
| iface = gr.Interface( |
| fn=vton_tryon, |
| inputs=[ |
| gr.Image(type="pil", label="Upload Human Image"), |
| gr.Image(type="pil", label="Upload Garment Image") |
| ], |
| outputs=gr.Image(type="pil", label="Try-On Result"), |
| title="IDM-VTON 👕 Virtual Try-On", |
| description="Upload a human image and a garment image to see the try-on result dynamically." |
| ) |
|
|
| if __name__ == "__main__": |
| iface.launch() |