| import gradio as gr |
| import os |
| import sys |
| from pathlib import Path |
| import os |
| import numpy as np |
| from gradio import * |
|
|
| models = [ |
| {"name": "Stable Diffusion 2", "url": "stabilityai/stable-diffusion-2-1"}, |
| {"name": "stability AI", "url": "stabilityai/stable-diffusion-2-1-base"}, |
| {"name": "XL-Refiner-1.0", "url": "stabilityai/stable-diffusion-xl-refiner-1.0"}, |
| {"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"}, |
| {"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"}, |
| {"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"}, |
| {"name": "Robo Diffusion", "url": "nousr/robo-diffusion"}, |
| {"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"}, |
| ] |
|
|
| current_model = models[0] |
|
|
| text_gen = gr.Interface.load("spaces/daspartho/prompt-extend") |
|
|
| models2 = [] |
| for model in models: |
| model_url = f"models/{model['url']}" |
| loaded_model = gr.Interface.load(model_url, live=True, preprocess=True) |
| models2.append(loaded_model) |
|
|
|
|
| def text_it(inputs, text_gen=text_gen): |
| return text_gen(inputs) |
|
|
| def load_image(image_path): |
| image = cv2.imread(image_path) |
| return image |
|
|
| def set_model(current_model_index): |
| global current_model |
| current_model = models[current_model_index] |
| return gr.update(value=f"{current_model['name']}") |
|
|
|
|
| def send_it(inputs, model_choice): |
| proc = models2[model_choice] |
| return proc(inputs) |
|
|
|
|
| with gr.Blocks(css='style.css') as myface: |
| gr.HTML( |
|
|
| ) |
| with gr.Row(): |
| with gr.Row(): |
| input_text = gr.Textbox(label="Prompt idea", placeholder="", lines=1) |
| |
| model_name1 = gr.Dropdown( |
| label="Choose Model", |
| choices=[m["name"] for m in models], |
| type="index", |
| value=current_model["name"], |
| interactive=True, |
| ) |
| with gr.Row(): |
| see_prompts = gr.Button("Generate Prompts") |
| run = gr.Button("Generate Images", variant="primary") |
| |
| with gr.Row(): |
| output1 = gr.Image(label="") |
| output2 = gr.Image(label="") |
| output3 = gr.Image(label="") |
| with gr.Row(): |
| magic1 = gr.Textbox(label="Generated Prompt", lines=2) |
| magic2 = gr.Textbox(label="Generated Prompt", lines=2) |
| magic3 = gr.Textbox(label="Generated Prompt", lines=2) |
| with gr.Row(): |
| output4 = gr.Image(label="") |
| output5 = gr.Image(label="") |
| output6 = gr.Image(label="") |
| with gr.Row(): |
| magic4 = gr.Textbox(label="Generated Prompt", lines=2) |
| magic5 = gr.Textbox(label="Generated Prompt", lines=2) |
| magic6 = gr.Textbox(label="Generated Prompt", lines=2) |
| |
| model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6]) |
|
|
| run.click(send_it, inputs=[magic1, model_name1], outputs=[output1]) |
| run.click(send_it, inputs=[magic2, model_name1], outputs=[output2]) |
| run.click(send_it, inputs=[magic3, model_name1], outputs=[output3]) |
| run.click(send_it, inputs=[magic4, model_name1], outputs=[output4]) |
| run.click(send_it, inputs=[magic5, model_name1], outputs=[output5]) |
| run.click(send_it, inputs=[magic6, model_name1], outputs=[output6]) |
|
|
| see_prompts.click(text_it, inputs=[input_text], outputs=[magic1]) |
| see_prompts.click(text_it, inputs=[input_text], outputs=[magic2]) |
| see_prompts.click(text_it, inputs=[input_text], outputs=[magic3]) |
| see_prompts.click(text_it, inputs=[input_text], outputs=[magic4]) |
| see_prompts.click(text_it, inputs=[input_text], outputs=[magic5]) |
| see_prompts.click(text_it, inputs=[input_text], outputs=[magic6]) |
| |
| myface.queue(concurrency_count=200) |
| myface.launch(inline=True, show_api=True, max_threads=400) |
| |
| demo.launch() |