Spaces:
Paused
Paused
| import os | |
| import torch | |
| import gradio as gr | |
| from diffusers import AutoencoderTiny | |
| from torchvision.transforms.functional import to_pil_image, center_crop, resize, to_tensor | |
| device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' | |
| vae = None | |
| def get_model(): | |
| global vae | |
| if vae is None: | |
| model_id = "madebyollin/taesd" | |
| vae = AutoencoderTiny.from_pretrained(model_id, safetensors=True).to(device) | |
| return vae | |
| def encode(image): | |
| vae = get_model() | |
| DIM = [512] | |
| processed = center_crop(resize(image, DIM), DIM) | |
| tensor = to_tensor(processed).unsqueeze(0).to(device) | |
| latents = vae.encoder(tensor) | |
| scaled = vae.scale_latents(latents).mul_(255).round_().byte() | |
| return to_pil_image(scaled[0]) | |
| path = 'https://huggingface.co/buckets/ciCic/demo-purposes/resolve/images' | |
| astronaut = f"{path}/6.png" | |
| def app(): | |
| return gr.Interface(encode, | |
| gr.Image(type="pil", | |
| label='512x512', | |
| value=astronaut), | |
| gr.Image(type="pil", | |
| image_mode="RGBA", | |
| label='64x64', | |
| height=256, | |
| width=256 | |
| ), | |
| examples=[ | |
| astronaut, | |
| f"{path}/7.png", | |
| f"{path}/34.png" | |
| ], flagging_mode='never', title='Image Encoder') | |
| if __name__ == "__main__": | |
| print("LAUNCHING") | |
| app().launch(server_name="0.0.0.0", server_port=7860, share=True) | |