tryone / app.py
fanboyd13's picture
Upload app.py
e905fe4 verified
# app.py
import cv2
import numpy as np
from PIL import Image
import gradio as gr
def vton_tryon(human: Image.Image, garment: Image.Image) -> Image.Image:
# Convert PIL to OpenCV format
human = cv2.cvtColor(np.array(human), cv2.COLOR_RGB2BGR)
garment = cv2.cvtColor(np.array(garment), cv2.COLOR_RGB2BGR)
# Resize garment to fit human image width
scale_factor = human.shape[1] / garment.shape[1] * 0.5 # garment takes half the human width
new_size = (int(garment.shape[1] * scale_factor), int(garment.shape[0] * scale_factor))
garment_resized = cv2.resize(garment, new_size, interpolation=cv2.INTER_AREA)
# Position: top center of human image
x_offset = (human.shape[1] - garment_resized.shape[1]) // 2
y_offset = human.shape[0] // 4 # roughly chest area
# Create mask for garment
gray = cv2.cvtColor(garment_resized, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Region of interest (ROI) on human
roi = human[y_offset:y_offset+garment_resized.shape[0], x_offset:x_offset+garment_resized.shape[1]]
# Mask human and garment
human_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
garment_fg = cv2.bitwise_and(garment_resized, garment_resized, mask=mask)
# Merge garment with human
dst = cv2.add(human_bg, garment_fg)
human[y_offset:y_offset+garment_resized.shape[0], x_offset:x_offset+garment_resized.shape[1]] = dst
# Convert back to PIL
result = cv2.cvtColor(human, cv2.COLOR_BGR2RGB)
return Image.fromarray(result)
# Gradio Interface
iface = gr.Interface(
fn=vton_tryon,
inputs=[
gr.Image(type="pil", label="Upload Human Image"),
gr.Image(type="pil", label="Upload Garment Image")
],
outputs=gr.Image(type="pil", label="Try-On Result"),
title="IDM-VTON 👕 Virtual Try-On",
description="Upload a human image and a garment image to see the try-on result dynamically."
)
if __name__ == "__main__":
iface.launch()