Spaces:
Running
Running
| import gradio as gr | |
| import torch | |
| from diffusers import StableDiffusionPipeline | |
| import os | |
| # Set the model ID | |
| MODEL_ID = "runwayml/stable-diffusion-v1-5" | |
| # Detect hardware | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| # Use float16 for faster, lighter inference if on GPU | |
| TORCH_DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32 | |
| # Load the pipeline | |
| print(f"Loading model on {DEVICE}...") | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=TORCH_DTYPE, | |
| use_safetensors=True | |
| ) | |
| pipe = pipe.to(DEVICE) | |
| def generate_image(prompt, negative_prompt): | |
| if not prompt: | |
| return None | |
| try: | |
| # Generate the image | |
| image = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| num_inference_steps=30, | |
| guidance_scale=7.5 | |
| ).images[0] | |
| return image | |
| except Exception as e: | |
| print(f"Error: {e}") | |
| return None | |
| # Build the UI | |
| with gr.Blocks(theme=gr.themes.Base()) as demo: | |
| gr.Markdown("# ✨ AI Image Generator") | |
| gr.Markdown("Enter a prompt and click generate to create an image.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt = gr.Textbox(label="Prompt", placeholder="A majestic lion in the jungle...") | |
| neg_prompt = gr.Textbox(label="Negative Prompt", placeholder="blurry, low quality, distorted") | |
| btn = gr.Button("Generate", variant="primary") | |
| with gr.Column(): | |
| output = gr.Image(label="Result") | |
| btn.click(fn=generate_image, inputs=[prompt, neg_prompt], outputs=output) | |
| if __name__ == "__main__": | |
| demo.launch() |