Stiphan commited on
Commit
a7d8a32
·
verified ·
1 Parent(s): 731fb43

Add AI Talking Photo (Lip-Sync) feature

Browse files

Generates lip-synced talking animation from a static face image and given text.

Files changed (1) hide show
  1. talking_photo.py +59 -0
talking_photo.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import tempfile
4
+ import os
5
+
6
+ # Hugging Face API Config
7
+ HF_API_TOKEN = "YOUR_HF_API_KEY" # Apna token lagao
8
+ MODEL_URL = "https://api-inference.huggingface.co/models/DeepFloyd/face-animator"
9
+ HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
10
+
11
+ def generate_talking_photo(image, text):
12
+ """🖼️ Photo + Text → Lip-Sync Video"""
13
+ if image is None or not text.strip():
14
+ return None
15
+
16
+ try:
17
+ # Save image temporarily
18
+ temp_img = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
19
+ image.save(temp_img.name)
20
+
21
+ payload = {
22
+ "inputs": {
23
+ "image": temp_img.name,
24
+ "text": text
25
+ }
26
+ }
27
+ files = {"image": open(temp_img.name, "rb")}
28
+
29
+ resp = requests.post(MODEL_URL, headers=HEADERS, files=files, data={"text": text})
30
+
31
+ if resp.status_code != 200:
32
+ print("Error:", resp.text)
33
+ return None
34
+
35
+ # Save video output
36
+ temp_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
37
+ with open(temp_video.name, "wb") as f:
38
+ f.write(resp.content)
39
+
40
+ return temp_video.name
41
+ except Exception as e:
42
+ print(f"Error: {e}")
43
+ return None
44
+
45
+ # ---------- Gradio UI ----------
46
+ with gr.Blocks() as demo:
47
+ gr.Markdown("<h2 style='text-align:center'>🗣️ Talking Photo — SaEdit MultiAi</h2>")
48
+
49
+ with gr.Row():
50
+ img_input = gr.Image(type="pil", label="📷 Upload Face Photo")
51
+ text_input = gr.Textbox(label="💬 What should they say?", lines=2)
52
+
53
+ btn = gr.Button("🎬 Generate Talking Video")
54
+ video_output = gr.Video(label="🖼️ Talking Animation")
55
+
56
+ btn.click(generate_talking_photo, inputs=[img_input, text_input], outputs=video_output)
57
+
58
+ if __name__ == "__main__":
59
+ demo.launch()