| import gradio as gr |
| from gradio_client import Client, handle_file |
| import re |
|
|
| |
| client = Client("selfit-camera/Omni-Image-Editor") |
|
|
| |
| video_client = Client("alexnasa/ltx-2-TURBO") |
|
|
| |
| omni_video_client = Client("FrameAI4687/Omni-Video-Factory") |
|
|
| |
| omni_video_client = Client("FrameAI4687/Omni-Video-Factory") |
|
|
| |
| def generate_image(prompt): |
| """ |
| Generate an image from a text prompt using the Omni Image Editor API. |
| |
| Args: |
| prompt (str): Text description of the image to generate |
| |
| Returns: |
| str: URL of the generated image or error message |
| """ |
| try: |
| |
| result = client.predict( |
| prompt=prompt, |
| aspect_ratio="16:9", |
| api_name="/text_to_image_interface" |
| ) |
|
|
| |
| |
| html_string = result[0] |
| match = re.search(r"src='([^']+)'", html_string) |
| if match: |
| image_url = match.group(1) |
| return image_url |
| else: |
| |
| return "https://via.placeholder.com/400x200?text=Error:Image+Not+Found" |
| except Exception as e: |
| return f"Error generating image: {str(e)}" |
|
|
| |
| def edit_image(input_image, edit_prompt): |
| """ |
| Edit an image based on a text prompt using the Omni Image Editor API. |
| |
| Args: |
| input_image (str): Path to the image file or image object |
| edit_prompt (str): Text description of the edits to apply |
| |
| Returns: |
| str: URL of the edited image or error message |
| """ |
| try: |
| if input_image is None: |
| return "Please upload an image first" |
| |
| |
| result = client.predict( |
| input_image=handle_file(input_image), |
| prompt=edit_prompt, |
| api_name="/edit_image_interface" |
| ) |
| |
| |
| if isinstance(result, tuple) and len(result) > 0: |
| html_string = result[0] |
| match = re.search(r"src='([^']+)'", html_string) |
| if match: |
| image_url = match.group(1) |
| return image_url |
| else: |
| return "https://via.placeholder.com/400x200?text=Error:Image+Not+Found" |
| else: |
| return str(result) |
| |
| except Exception as e: |
| return f"Error editing image: {str(e)}" |
|
|
| |
| def upscale_image(input_image): |
| """ |
| Upscale an image to higher resolution using the Omni Image Editor API. |
| |
| Args: |
| input_image (str): Path to the image file or image object to upscale |
| |
| Returns: |
| str: URL of the upscaled image or error message |
| """ |
| try: |
| if input_image is None: |
| return "Please upload an image first" |
| |
| |
| result = client.predict( |
| input_image=handle_file(input_image), |
| api_name="/image_upscale_interface" |
| ) |
| |
| |
| if isinstance(result, tuple) and len(result) > 0: |
| html_string = result[0] |
| match = re.search(r"src='([^']+)'", html_string) |
| if match: |
| image_url = match.group(1) |
| return image_url |
| else: |
| return "https://via.placeholder.com/400x200?text=Error:Image+Not+Found" |
| else: |
| return str(result) |
| |
| except Exception as e: |
| return f"Error upscaling image: {str(e)}" |
|
|
| |
| def generate_video(first_frame, end_frame, prompt, duration, height, width, enhance_prompt, seed, randomize_seed, camera_lora): |
| """ |
| Generate a video from start and end frames using the LTX-2-TURBO API. |
| |
| Args: |
| first_frame (str): Path to the starting frame image |
| end_frame (str): Path to the ending frame image |
| prompt (str): Text description of the video to generate |
| duration (int): Duration of the video in seconds |
| height (int): Height of the video in pixels |
| width (int): Width of the video in pixels |
| enhance_prompt (bool): Whether to enhance the prompt with AI |
| seed (int): Random seed for reproducibility |
| randomize_seed (bool): Whether to randomize the seed |
| camera_lora (str): Camera LoRA setting |
| |
| Returns: |
| str: Path to the generated video or error message |
| """ |
| try: |
| if first_frame is None or end_frame is None: |
| return "Please upload both start and end frame images" |
| |
| if not prompt.strip(): |
| return "Please enter a video prompt" |
| |
| |
| result = video_client.predict( |
| first_frame=handle_file(first_frame), |
| end_frame=handle_file(end_frame), |
| prompt=prompt, |
| duration=duration, |
| input_video=None, |
| generation_mode="Image-to-Video", |
| enhance_prompt=enhance_prompt, |
| seed=seed, |
| randomize_seed=randomize_seed, |
| height=height, |
| width=width, |
| camera_lora=camera_lora, |
| audio_path=None, |
| api_name="/generate_video" |
| ) |
| |
| |
| if result: |
| return result |
| else: |
| return "Error: No video generated" |
| |
| except Exception as e: |
| return f"Error generating video: {str(e)}" |
|
|
| |
| def generate_omni_video(base_prompt, scene_count, seconds_per_scene): |
| """ |
| Generate a video using the Omni Video Factory API. |
| |
| Args: |
| base_prompt (str): Base prompt describing the video scene |
| scene_count (str): Number of scenes to generate |
| seconds_per_scene (str): Duration of each scene in seconds |
| |
| Returns: |
| str: Path to the generated video or error message |
| """ |
| try: |
| if not base_prompt or not base_prompt.strip(): |
| return "Please enter a video prompt" |
| |
| if not scene_count or int(scene_count) < 1: |
| return "Please enter a valid scene count (minimum 1)" |
| |
| if not seconds_per_scene or int(seconds_per_scene) < 1: |
| return "Please enter a valid duration per scene (minimum 1 second)" |
| |
| |
| result = omni_video_client.predict( |
| base_prompt=base_prompt, |
| scene_count=str(scene_count), |
| seconds_per_scene=str(seconds_per_scene), |
| api_name="/_generate_i2v_scenes" |
| ) |
| |
| |
| if result: |
| return result |
| else: |
| return "Error: No video generated" |
| |
| except Exception as e: |
| return f"Error generating video: {str(e)}" |
|
|
| |
| with gr.Blocks( |
| title='Omni Image Editor with Gradio', |
| theme=gr.themes.Soft() |
| ) as demo: |
| gr.Markdown("# Omni Image Editor Studio") |
| gr.Markdown("Generate images from text descriptions or edit existing images with AI-powered tools.") |
| |
| with gr.Tabs(): |
| |
| with gr.TabItem("Text to Image Generator"): |
| gr.Markdown("### Generate Images from Text") |
| gr.Markdown("Describe the image you want to generate in detail for best results.") |
| |
| with gr.Row(): |
| with gr.Column(): |
| prompt_input = gr.Textbox( |
| label='Image Description', |
| placeholder='e.g., A futuristic city at sunset with flying cars, neon lights, cyberpunk style, high quality', |
| lines=3 |
| ) |
| generate_btn = gr.Button("🎨 Generate Image", variant="primary") |
| |
| generated_image = gr.Image(label='Generated Image', type='filepath') |
| |
| |
| generate_btn.click( |
| fn=generate_image, |
| inputs=[prompt_input], |
| outputs=[generated_image] |
| ) |
| |
| |
| with gr.TabItem("Image Editor"): |
| gr.Markdown("### Edit Images with AI") |
| gr.Markdown("Upload an image and describe the changes you want to make.") |
| |
| with gr.Row(): |
| with gr.Column(): |
| input_image = gr.Image( |
| label='Upload Image', |
| type='filepath' |
| ) |
| |
| with gr.Row(): |
| with gr.Column(): |
| edit_prompt = gr.Textbox( |
| label='Edit Instructions', |
| placeholder='e.g., Change the sky to sunset colors, add stars, increase contrast', |
| lines=3 |
| ) |
| edit_btn = gr.Button("✨ Edit Image", variant="primary") |
| |
| edited_image = gr.Image(label='Edited Image', type='filepath') |
| |
| |
| edit_btn.click( |
| fn=edit_image, |
| inputs=[input_image, edit_prompt], |
| outputs=[edited_image] |
| ) |
| |
| |
| with gr.TabItem("Image Upscaler"): |
| gr.Markdown("### Upscale Images to Higher Resolution") |
| gr.Markdown("Upload an image and enhance it to higher resolution using AI-powered upscaling.") |
| |
| with gr.Row(): |
| with gr.Column(): |
| upscale_input = gr.Image( |
| label='Upload Image to Upscale', |
| type='filepath' |
| ) |
| upscale_btn = gr.Button("⬆️ Upscale Image", variant="primary") |
| |
| upscaled_image = gr.Image(label='Upscaled Image', type='filepath') |
| |
| |
| upscale_btn.click( |
| fn=upscale_image, |
| inputs=[upscale_input], |
| outputs=[upscaled_image] |
| ) |
| |
| |
| with gr.TabItem("Video Generator"): |
| gr.Markdown("### Generate Videos from Images") |
| gr.Markdown("Upload start and end frame images and describe the motion you want to create.") |
| |
| with gr.Row(): |
| with gr.Column(): |
| video_first_frame = gr.Image( |
| label='First Frame (Start Image)', |
| type='filepath' |
| ) |
| with gr.Column(): |
| video_end_frame = gr.Image( |
| label='End Frame (Final Image)', |
| type='filepath' |
| ) |
| |
| with gr.Row(): |
| with gr.Column(): |
| video_prompt = gr.Textbox( |
| label='Video Description', |
| placeholder='e.g., Make this image come alive with cinematic motion, smooth camera pan, 4K quality', |
| lines=3 |
| ) |
| |
| with gr.Row(): |
| with gr.Column(): |
| video_duration = gr.Slider( |
| label='Duration (seconds)', |
| minimum=1, |
| maximum=10, |
| value=5, |
| step=1 |
| ) |
| with gr.Column(): |
| video_height = gr.Slider( |
| label='Height (pixels)', |
| minimum=256, |
| maximum=1024, |
| value=512, |
| step=64 |
| ) |
| with gr.Column(): |
| video_width = gr.Slider( |
| label='Width (pixels)', |
| minimum=256, |
| maximum=1024, |
| value=768, |
| step=64 |
| ) |
| |
| with gr.Row(): |
| with gr.Column(): |
| video_enhance = gr.Checkbox( |
| label='Enhance Prompt with AI', |
| value=True |
| ) |
| with gr.Column(): |
| video_randomize = gr.Checkbox( |
| label='Randomize Seed', |
| value=True |
| ) |
| with gr.Column(): |
| video_seed = gr.Number( |
| label='Seed', |
| value=10, |
| precision=0 |
| ) |
| |
| with gr.Row(): |
| with gr.Column(): |
| video_camera = gr.Dropdown( |
| label='Camera LoRA', |
| choices=['No LoRA', 'Pan Left', 'Pan Right', 'Zoom In', 'Zoom Out', 'Rotate CW', 'Rotate CCW'], |
| value='No LoRA' |
| ) |
| |
| with gr.Row(): |
| video_generate_btn = gr.Button("🎬 Generate Video", variant="primary", size='lg') |
| |
| generated_video = gr.Video(label='Generated Video') |
| |
| |
| video_generate_btn.click( |
| fn=generate_video, |
| inputs=[ |
| video_first_frame, |
| video_end_frame, |
| video_prompt, |
| video_duration, |
| video_height, |
| video_width, |
| video_enhance, |
| video_seed, |
| video_randomize, |
| video_camera |
| ], |
| outputs=[generated_video] |
| ) |
| |
| |
| with gr.TabItem("Omni Video Factory"): |
| gr.Markdown("### Generate Videos with Scene Sequences") |
| gr.Markdown("Create multi-scene videos from text prompts using the Omni Video Factory API.") |
| |
| with gr.Row(): |
| with gr.Column(): |
| omni_prompt = gr.Textbox( |
| label='Video Description', |
| placeholder='e.g., A drone shot over mountains with sunset, then a close-up of a waterfall, cinematic style', |
| lines=3 |
| ) |
| |
| with gr.Row(): |
| with gr.Column(): |
| omni_scene_count = gr.Slider( |
| label='Number of Scenes', |
| minimum=1, |
| maximum=5, |
| value=1, |
| step=1 |
| ) |
| with gr.Column(): |
| omni_duration = gr.Slider( |
| label='Seconds Per Scene', |
| minimum=1, |
| maximum=10, |
| value=3, |
| step=1 |
| ) |
| |
| with gr.Row(): |
| omni_generate_btn = gr.Button("🎥 Generate Video Scenes", variant="primary", size='lg') |
| |
| omni_generated_video = gr.Video(label='Generated Video with Scenes') |
| |
| |
| omni_generate_btn.click( |
| fn=generate_omni_video, |
| inputs=[ |
| omni_prompt, |
| omni_scene_count, |
| omni_duration |
| ], |
| outputs=[omni_generated_video] |
| ) |
|
|
| |
| if __name__ == "__main__": |
| demo.launch(share=True) |