text stringlengths 0 5.54k |
|---|
# prepare image |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" |
init_image = load_image(url) |
init_image = init_image.resize((958, 960)) # resize to depth image dimensions |
depth_image = load_image("https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png") |
make_image_grid([init_image, depth_image], rows=1, cols=2) Load a ControlNet model conditioned on depth maps and the AutoPipelineForImage2Image: Copied from diffusers import ControlNetModel, AutoPipelineForImage2Image |
import torch |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) |
pipeline = AutoPipelineForImage2Image.from_pretrained( |
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", use_safetensors=True |
) |
pipeline.enable_model_cpu_offload() |
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed |
pipeline.enable_xformers_memory_efficient_attention() Now generate a new image conditioned on the depth map, initial image, and prompt: Copied prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" |
image_control_net = pipeline(prompt, image=init_image, control_image=depth_image).images[0] |
make_image_grid([init_image, depth_image, image_control_net], rows=1, cols=3) initial image depth image ControlNet image Let’s apply a new style to the image generated from the ControlNet by chaining it with an image-to-image pipeline: Copied pipeline = AutoPipelineForImage2Image.from_pretrained( |
"nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, |
) |
pipeline.enable_model_cpu_offload() |
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed |
pipeline.enable_xformers_memory_efficient_attention() |
prompt = "elden ring style astronaut in a jungle" # include the token "elden ring style" in the prompt |
negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy" |
image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image_control_net, strength=0.45, guidance_scale=10.5).images[0] |
make_image_grid([init_image, depth_image, image_control_net, image_elden_ring], rows=2, cols=2) Optimize Running diffusion models is computationally expensive and intensive, but with a few optimization tricks, it is entirely possible to run them on consumer and free-tier GPUs. For example, you can use a more memory-e... |
+ pipeline.enable_xformers_memory_efficient_attention() With torch.compile, you can boost your inference speed even more by wrapping your UNet with it: Copied pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) To learn more, take a look at the Reduce memory usage and Torch 2.0 guides... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.