| import gradio as gr |
| import torch |
| from diffusers import StableDiffusionControlNetPipeline, ControlNetModel |
| from diffusers.utils import load_image |
| from controlnet_aux import OpenposeDetector |
| import numpy as np |
| from PIL import Image |
|
|
| |
| controlnet = ControlNetModel.from_pretrained( |
| "lllyasviel/control_v11p_sd15_openpose", |
| torch_dtype=torch.float16 |
| ) |
| pipe = StableDiffusionControlNetPipeline.from_pretrained( |
| "runwayml/stable-diffusion-v1-5", |
| controlnet=controlnet, |
| torch_dtype=torch.float16 |
| ).to("cuda") |
|
|
| pose_detector = OpenposeDetector.from_pretrained("lllyasviel/Annotators") |
|
|
| def transfer_pose(pose_image, appearance_image, prompt="person"): |
| |
| pose_img = pose_detector(pose_image) |
| |
| |
| |
| result = pipe( |
| prompt=f"photo of {prompt}, high quality", |
| image=pose_img, |
| controlnet_conditioning_scale=1.0, |
| num_inference_steps=20, |
| guidance_scale=7.0 |
| ).images[0] |
| |
| return pose_img, result |
|
|
| |
| demo = gr.Interface( |
| fn=transfer_pose, |
| inputs=[ |
| gr.Image(label="Source Pose Image", type="pil"), |
| gr.Image(label="Target Appearance Image", type="pil"), |
| gr.Textbox(label="Prompt (optional)", value="person") |
| ], |
| outputs=[ |
| gr.Image(label="Detected Pose"), |
| gr.Image(label="Result") |
| ], |
| title="Pose Transfer Tool", |
| description="Transfer pose from first image to generate a new image" |
| ) |
|
|
| demo.launch() |