| import gradio as gr |
| import os |
| from huggingface_hub import InferenceClient |
| import tempfile |
| import shutil |
| from pathlib import Path |
|
|
| |
| client = InferenceClient( |
| provider="fal-ai", |
| api_key=os.environ.get("HF_TOKEN"), |
| bill_to="huggingface", |
| ) |
|
|
| def text_to_video(prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None): |
| """Generate video from text prompt""" |
| try: |
| if profile is None: |
| return None, "❌ Click Sign in with Hugging Face button to use this app for free" |
| |
| if not prompt or prompt.strip() == "": |
| return None, "Please enter a text prompt" |
| |
| |
| video = client.text_to_video( |
| prompt, |
| model="akhaliq/veo3.1-fast", |
| ) |
| |
| |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file: |
| tmp_file.write(video) |
| video_path = tmp_file.name |
| |
| return video_path, f"✅ Video generated successfully from prompt: '{prompt[:50]}...'" |
| |
| except Exception as e: |
| return None, f"❌ Error generating video: {str(e)}" |
|
|
| def image_to_video(image, prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None): |
| """Generate video from image and prompt""" |
| try: |
| if profile is None: |
| return None, "❌ Click Sign in with Hugging Face button to use this app for free" |
| |
| if image is None: |
| return None, "Please upload an image" |
| |
| if not prompt or prompt.strip() == "": |
| return None, "Please enter a prompt describing the motion" |
| |
| |
| if isinstance(image, str): |
| |
| with open(image, "rb") as image_file: |
| input_image = image_file.read() |
| else: |
| |
| import io |
| from PIL import Image as PILImage |
| |
| |
| if isinstance(image, PILImage.Image): |
| buffer = io.BytesIO() |
| image.save(buffer, format='PNG') |
| input_image = buffer.getvalue() |
| else: |
| |
| pil_image = PILImage.fromarray(image) |
| buffer = io.BytesIO() |
| pil_image.save(buffer, format='PNG') |
| input_image = buffer.getvalue() |
| |
| |
| video = client.image_to_video( |
| input_image, |
| prompt=prompt, |
| model="akhaliq/veo3.1-fast-image-to-video", |
| ) |
| |
| |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file: |
| tmp_file.write(video) |
| video_path = tmp_file.name |
| |
| return video_path, f"✅ Video generated successfully with motion: '{prompt[:50]}...'" |
| |
| except Exception as e: |
| return None, f"❌ Error generating video: {str(e)}" |
|
|
| def clear_text_tab(): |
| """Clear text-to-video tab""" |
| return "", None, "" |
|
|
| def clear_image_tab(): |
| """Clear image-to-video tab""" |
| return None, "", None, "" |
|
|
| |
| custom_css = """ |
| .container { |
| max-width: 1200px; |
| margin: auto; |
| } |
| .header-link { |
| text-decoration: none; |
| color: #2196F3; |
| font-weight: bold; |
| } |
| .header-link:hover { |
| text-decoration: underline; |
| } |
| .status-box { |
| padding: 10px; |
| border-radius: 5px; |
| margin-top: 10px; |
| } |
| .auth-warning { |
| color: #ff6b00; |
| font-weight: bold; |
| text-align: center; |
| margin: 1em 0; |
| padding: 1em; |
| background-color: #fff3e0; |
| border-radius: 5px; |
| } |
| """ |
|
|
| |
| with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="AI Video Generator") as demo: |
| gr.Markdown( |
| """ |
| # 🎬 AI Video Generator |
| ### Generate stunning videos from text or animate your images with AI |
| #### Powered by VEO 3.1 Fast Model | [Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder) |
| """ |
| ) |
| |
| gr.HTML( |
| """ |
| <div class="auth-warning"> |
| ⚠️ You must Sign in with Hugging Face using the button below to use this app. |
| </div> |
| """ |
| ) |
| |
| |
| gr.LoginButton() |
| |
| with gr.Tabs() as tabs: |
| |
| with gr.Tab("📝 Text to Video", id=0): |
| gr.Markdown("### Transform your text descriptions into dynamic videos") |
| |
| with gr.Row(): |
| with gr.Column(scale=1): |
| text_prompt = gr.Textbox( |
| label="Text Prompt", |
| placeholder="Describe the video you want to create... (e.g., 'A young man walking on the street during sunset')", |
| lines=4, |
| max_lines=6 |
| ) |
| |
| with gr.Accordion("Advanced Settings", open=False): |
| text_duration = gr.Slider( |
| minimum=1, |
| maximum=10, |
| value=5, |
| step=1, |
| label="Duration (seconds)", |
| info="Video duration in seconds" |
| ) |
| text_aspect_ratio = gr.Dropdown( |
| choices=["16:9", "9:16", "1:1", "4:3", "21:9"], |
| value="16:9", |
| label="Aspect Ratio", |
| info="Video aspect ratio" |
| ) |
| text_resolution = gr.Dropdown( |
| choices=["480p", "720p", "1080p"], |
| value="720p", |
| label="Resolution", |
| info="Video resolution" |
| ) |
| |
| with gr.Row(): |
| text_generate_btn = gr.Button("🎬 Generate Video", variant="primary", scale=2) |
| text_clear_btn = gr.ClearButton(value="🗑️ Clear", scale=1) |
| |
| text_status = gr.Textbox( |
| label="Status", |
| interactive=False, |
| visible=True, |
| elem_classes=["status-box"] |
| ) |
| |
| with gr.Column(scale=1): |
| text_video_output = gr.Video( |
| label="Generated Video", |
| autoplay=True, |
| show_download_button=True, |
| height=400 |
| ) |
| |
| |
| gr.Examples( |
| examples=[ |
| ["A serene beach at sunset with gentle waves"], |
| ["A bustling city street with neon lights at night"], |
| ["A majestic eagle soaring through mountain peaks"], |
| ["An astronaut floating in space near the International Space Station"], |
| ["Cherry blossoms falling in slow motion in a Japanese garden"], |
| ], |
| inputs=text_prompt, |
| label="Example Prompts" |
| ) |
| |
| |
| with gr.Tab("🖼️ Image to Video", id=1): |
| gr.Markdown("### Bring your static images to life with motion") |
| |
| with gr.Row(): |
| with gr.Column(scale=1): |
| image_input = gr.Image( |
| label="Upload Image", |
| type="pil", |
| height=300 |
| ) |
| |
| image_prompt = gr.Textbox( |
| label="Motion Prompt", |
| placeholder="Describe how the image should move... (e.g., 'The cat starts to dance')", |
| lines=3, |
| max_lines=5 |
| ) |
| |
| with gr.Accordion("Advanced Settings", open=False): |
| image_duration = gr.Slider( |
| minimum=1, |
| maximum=10, |
| value=5, |
| step=1, |
| label="Duration (seconds)", |
| info="Video duration in seconds" |
| ) |
| image_aspect_ratio = gr.Dropdown( |
| choices=["16:9", "9:16", "1:1", "4:3", "21:9"], |
| value="16:9", |
| label="Aspect Ratio", |
| info="Video aspect ratio" |
| ) |
| image_resolution = gr.Dropdown( |
| choices=["480p", "720p", "1080p"], |
| value="720p", |
| label="Resolution", |
| info="Video resolution" |
| ) |
| |
| with gr.Row(): |
| image_generate_btn = gr.Button("🎬 Animate Image", variant="primary", scale=2) |
| image_clear_btn = gr.ClearButton(value="🗑️ Clear", scale=1) |
| |
| image_status = gr.Textbox( |
| label="Status", |
| interactive=False, |
| visible=True, |
| elem_classes=["status-box"] |
| ) |
| |
| with gr.Column(scale=1): |
| image_video_output = gr.Video( |
| label="Generated Video", |
| autoplay=True, |
| show_download_button=True, |
| height=400 |
| ) |
| |
| |
| gr.Examples( |
| examples=[ |
| [None, "The person starts walking forward"], |
| [None, "The animal begins to run"], |
| [None, "Camera slowly zooms in while the subject smiles"], |
| [None, "The flowers sway gently in the breeze"], |
| [None, "The clouds move across the sky in time-lapse"], |
| ], |
| inputs=[image_input, image_prompt], |
| label="Example Motion Prompts" |
| ) |
| |
| |
| with gr.Accordion("📖 How to Use", open=False): |
| gr.Markdown( |
| """ |
| ### Text to Video: |
| 1. Enter a detailed description of the video you want to create |
| 2. Optionally adjust advanced settings (duration, aspect ratio, resolution) |
| 3. Click "Generate Video" and wait for the AI to create your video |
| 4. Download or preview your generated video |
| |
| ### Image to Video: |
| 1. Upload an image you want to animate |
| 2. Describe the motion or action you want to add to the image |
| 3. Optionally adjust advanced settings |
| 4. Click "Animate Image" to bring your image to life |
| 5. Download or preview your animated video |
| |
| ### Tips for Better Results: |
| - Be specific and descriptive in your prompts |
| - For image-to-video, describe natural motions that fit the image |
| - Use high-quality input images for better results |
| - Experiment with different prompts to get the desired effect |
| """ |
| ) |
| |
| |
| text_generate_btn.click( |
| fn=text_to_video, |
| inputs=[text_prompt, text_duration, text_aspect_ratio, text_resolution], |
| outputs=[text_video_output, text_status], |
| show_progress="full", |
| queue=False, |
| api_name=False, |
| show_api=False |
| ) |
| |
| text_clear_btn.click( |
| fn=clear_text_tab, |
| inputs=[], |
| outputs=[text_prompt, text_video_output, text_status], |
| queue=False |
| ) |
| |
| image_generate_btn.click( |
| fn=image_to_video, |
| inputs=[image_input, image_prompt, image_duration, image_aspect_ratio, image_resolution], |
| outputs=[image_video_output, image_status], |
| show_progress="full", |
| queue=False, |
| api_name=False, |
| show_api=False |
| ) |
| |
| image_clear_btn.click( |
| fn=clear_image_tab, |
| inputs=[], |
| outputs=[image_input, image_prompt, image_video_output, image_status], |
| queue=False |
| ) |
|
|
| |
| if __name__ == "__main__": |
| demo.launch( |
| show_api=False, |
| share=False, |
| show_error=True, |
| enable_monitoring=False, |
| quiet=True |
| ) |