Wiuhh commited on
Commit
544c833
·
verified ·
1 Parent(s): 59d38c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +571 -135
app.py CHANGED
@@ -1,154 +1,590 @@
1
  import gradio as gr
2
  import numpy as np
3
- import random
 
 
 
 
 
 
 
4
 
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
 
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
  )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
  )
 
 
 
 
91
 
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
 
 
 
 
 
 
 
 
 
 
 
 
98
  )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
  )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
  )
118
-
119
  with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
  )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
  )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  ],
150
- outputs=[result, seed],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  )
152
 
153
  if __name__ == "__main__":
154
- demo.launch()
 
1
  import gradio as gr
2
  import numpy as np
3
+ import cv2
4
+ import os
5
+ import tempfile
6
+ import time
7
+ from PIL import Image, ImageDraw, ImageFont
8
+ import textwrap
9
+ from typing import Tuple, List
10
+ import colorsys
11
 
12
+ class CPUVideoGenerator:
13
+ def __init__(self):
14
+ self.temp_dir = tempfile.mkdtemp()
15
+
16
+ def generate_text_video(self, prompt: str, duration: int = 5, fps: int = 24,
17
+ style: str = "typewriter", width: int = 512, height: int = 512) -> str:
18
+ """Generate a video with animated text based on the prompt."""
19
+
20
+ # Create video writer
21
+ output_path = os.path.join(self.temp_dir, f"video_{int(time.time())}.mp4")
22
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
23
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
24
+
25
+ total_frames = duration * fps
26
+
27
+ try:
28
+ if style == "typewriter":
29
+ self._create_typewriter_animation(out, prompt, total_frames, width, height, fps)
30
+ elif style == "fade":
31
+ self._create_fade_animation(out, prompt, total_frames, width, height, fps)
32
+ elif style == "slide":
33
+ self._create_slide_animation(out, prompt, total_frames, width, height, fps)
34
+ elif style == "glitch":
35
+ self._create_glitch_animation(out, prompt, total_frames, width, height, fps)
36
+ elif style == "wave":
37
+ self._create_wave_animation(out, prompt, total_frames, width, height, fps)
38
+ else:
39
+ self._create_typewriter_animation(out, prompt, total_frames, width, height, fps)
40
+
41
+ out.release()
42
+ return output_path
43
+
44
+ except Exception as e:
45
+ out.release()
46
+ raise e
47
+
48
+ def _create_typewriter_animation(self, out, prompt: str, total_frames: int,
49
+ width: int, height: int, fps: int):
50
+ """Create a typewriter effect animation."""
51
+ # Create PIL image for text rendering
52
+ img = Image.new('RGB', (width, height), color=(20, 20, 30))
53
+ draw = ImageDraw.Draw(img)
54
+
55
+ # Calculate text metrics
56
+ margin = 50
57
+ font_size = max(20, min(60, (width - 2 * margin) // 20))
58
+ font = self._get_font(font_size)
59
+
60
+ # Wrap text
61
+ lines = textwrap.wrap(prompt, width=30)
62
+ line_height = font_size + 10
63
+ total_text_height = len(lines) * line_height
64
+ start_y = (height - total_text_height) // 2
65
+
66
+ # Calculate characters per frame
67
+ total_chars = sum(len(line) for line in lines)
68
+ chars_per_frame = max(1, total_chars // (total_frames * 0.7)) # 70% of duration for typing
69
+
70
+ char_count = 0
71
+ for frame_num in range(total_frames):
72
+ # Clear background with gradient
73
+ for y in range(height):
74
+ color_val = 20 + int(10 * (y / height))
75
+ color = (color_val, color_val, color_val + 10)
76
+ cv2.rectangle(img, (0, y), (width, y + 1), color, -1)
77
+
78
+ # Draw typewriter text
79
+ current_chars = min(char_count, total_chars)
80
+ drawn_chars = 0
81
+ y_pos = start_y
82
+
83
+ for line in lines:
84
+ if drawn_chars >= current_chars:
85
+ break
86
+
87
+ line_end = min(len(line), current_chars - drawn_chars)
88
+ if line_end > 0:
89
+ # Draw text with shadow
90
+ text = line[:line_end]
91
+ draw.text((margin + 2, y_pos + 2), text, font=font, fill=(0, 0, 0))
92
+ draw.text((margin, y_pos), text, font=font, fill=(200, 200, 220))
93
+
94
+ # Draw cursor
95
+ if drawn_chars + line_end < current_chars and frame_num % 10 < 5:
96
+ text_width = font.getlength(text)
97
+ draw.line([(margin + text_width, y_pos),
98
+ (margin + text_width, y_pos + font_size)],
99
+ fill=(255, 255, 100), width=2)
100
+
101
+ drawn_chars += len(line)
102
+ y_pos += line_height
103
+
104
+ # Add blinking cursor at the end when typing is complete
105
+ if char_count >= total_chars and frame_num % 20 < 10:
106
+ last_line_y = start_y + (len(lines) - 1) * line_height
107
+ last_line = lines[-1]
108
+ text_width = font.getlength(last_line)
109
+ draw.line([(margin + text_width, last_line_y),
110
+ (margin + text_width, last_line_y + font_size)],
111
+ fill=(255, 255, 100), width=2)
112
+
113
+ # Convert PIL to OpenCV
114
+ frame = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
115
+ out.write(frame)
116
+
117
+ if char_count < total_chars:
118
+ char_count += chars_per_frame
119
+
120
+ def _create_fade_animation(self, out, prompt: str, total_frames: int,
121
+ width: int, height: int, fps: int):
122
+ """Create a fade in/out animation."""
123
+ img = Image.new('RGB', (width, height), color=(30, 20, 40))
124
+ draw = ImageDraw.Draw(img)
125
+
126
+ # Text setup
127
+ margin = 50
128
+ font_size = max(20, min(50, (width - 2 * margin) // 15))
129
+ font = self._get_font(font_size)
130
+
131
+ lines = textwrap.wrap(prompt, width=25)
132
+ line_height = font_size + 15
133
+ total_text_height = len(lines) * line_height
134
+ start_y = (height - total_text_height) // 2
135
+
136
+ for frame_num in range(total_frames):
137
+ # Calculate fade alpha
138
+ fade_progress = frame_num / total_frames
139
+ if fade_progress < 0.3: # Fade in
140
+ alpha = fade_progress / 0.3
141
+ elif fade_progress > 0.7: # Fade out
142
+ alpha = (1.0 - fade_progress) / 0.3
143
+ else: # Full visibility
144
+ alpha = 1.0
145
+
146
+ # Background with animated gradient
147
+ for y in range(height):
148
+ hue = (frame_num * 2 + y * 0.5) % 360
149
+ rgb = colorsys.hsv_to_rgb(hue / 360, 0.3, 0.2)
150
+ color = tuple(int(c * 255) for c in rgb)
151
+ cv2.rectangle(img, (0, y), (width, y + 1), color, -1)
152
+
153
+ # Draw text with fade
154
+ text_color = tuple(int(200 * alpha) for _ in range(3))
155
+ y_pos = start_y
156
+ for line in lines:
157
+ draw.text((margin, y_pos), line, font=font, fill=text_color)
158
+ y_pos += line_height
159
+
160
+ frame = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
161
+ out.write(frame)
162
+
163
+ def _create_slide_animation(self, out, prompt: str, total_frames: int,
164
+ width: int, height: int, fps: int):
165
+ """Create a sliding text animation."""
166
+ img = Image.new('RGB', (width, height), color=(40, 30, 50))
167
+ draw = ImageDraw.Draw(img)
168
+
169
+ font_size = max(25, min(60, (width - 2 * 50) // 12))
170
+ font = self._get_font(font_size)
171
+
172
+ lines = textwrap.wrap(prompt, width=20)
173
+ line_height = font_size + 15
174
+
175
+ for frame_num in range(total_frames):
176
+ # Animated background
177
+ for y in range(height):
178
+ intensity = 40 + int(20 * np.sin((frame_num + y) * 0.05))
179
+ color = (intensity, intensity - 10, intensity + 10)
180
+ cv2.rectangle(img, (0, y), (width, y + 1), color, -1)
181
+
182
+ # Calculate slide position
183
+ slide_progress = (frame_num / total_frames)
184
+ x_offset = int(width * (1 - 2 * abs(slide_progress - 0.5)))
185
+
186
+ y_pos = (height - len(lines) * line_height) // 2
187
+ for line in lines:
188
+ draw.text((x_offset, y_pos), line, font=font, fill=(220, 200, 180))
189
+ y_pos += line_height
190
+
191
+ frame = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
192
+ out.write(frame)
193
+
194
+ def _create_glitch_animation(self, out, prompt: str, total_frames: int,
195
+ width: int, height: int, fps: int):
196
+ """Create a glitch-style animation."""
197
+ img = Image.new('RGB', (width, height), color=(10, 10, 20))
198
+ draw = ImageDraw.Draw(img)
199
+
200
+ font_size = max(20, min(50, (width - 2 * 50) // 15))
201
+ font = self._get_font(font_size)
202
+
203
+ lines = textwrap.wrap(prompt, width=25)
204
+ line_height = font_size + 10
205
+ total_text_height = len(lines) * line_height
206
+ start_y = (height - total_text_height) // 2
207
+
208
+ for frame_num in range(total_frames):
209
+ # Glitch background
210
+ np_img = np.random.randint(0, 30, (height, width, 3), dtype=np.uint8)
211
+ img = Image.fromarray(np_img)
212
+ draw = ImageDraw.Draw(img)
213
+
214
+ # Add glitch lines
215
+ if frame_num % 5 == 0:
216
+ for _ in range(3):
217
+ y = np.random.randint(0, height)
218
+ color = (np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255))
219
+ cv2.rectangle(np_img, (0, y), (width, y + np.random.randint(1, 5)), color, -1)
220
+
221
+ # Glitch text effect
222
+ y_pos = start_y
223
+ for line in lines:
224
+ # Multiple text layers with offsets for glitch effect
225
+ offsets = [(0, 0), (2, 0), (-2, 2), (1, -1)]
226
+ colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255)]
227
+
228
+ for (dx, dy), color in zip(offsets, colors):
229
+ if frame_num % 2 == 0 or frame_num % 3 == 0:
230
+ draw.text((50 + dx + np.random.randint(-2, 3),
231
+ y_pos + dy + np.random.randint(-2, 3)),
232
+ line, font=font, fill=color)
233
+
234
+ y_pos += line_height
235
+
236
+ frame = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
237
+ frame = cv2.GaussianBlur(frame, (3, 3), 0) if frame_num % 10 == 0 else frame
238
+ out.write(frame)
239
+
240
+ def _create_wave_animation(self, out, prompt: str, total_frames: int,
241
+ width: int, height: int, fps: int):
242
+ """Create a wave text animation."""
243
+ img = Image.new('RGB', (width, height), color=(20, 40, 60))
244
+ draw = ImageDraw.Draw(img)
245
+
246
+ font_size = max(25, min(55, (width - 2 * 50) // 12))
247
+ font = self._get_font(font_size)
248
+
249
+ lines = textwrap.wrap(prompt, width=25)
250
+ line_height = font_size + 15
251
+
252
+ for frame_num in range(total_frames):
253
+ # Wave background
254
+ for y in range(height):
255
+ wave = np.sin((frame_num + y * 5) * 0.1) * 20
256
+ color_val = int(40 + wave)
257
+ color = (color_val // 2, color_val, color_val + 20)
258
+ cv2.rectangle(img, (0, y), (width, y + 1), color, -1)
259
+
260
+ # Wave text
261
+ y_pos = (height - len(lines) * line_height) // 2
262
+ for line_idx, line in enumerate(lines):
263
+ for char_idx, char in enumerate(line):
264
+ wave_offset = int(np.sin((frame_num * 0.1 + char_idx * 0.3 + line_idx)) * 10)
265
+ x_pos = 50 + char_idx * (font_size * 0.6)
266
+ y_offset = y_pos + line_idx * line_height + wave_offset
267
+
268
+ # Rainbow colors
269
+ hue = (frame_num * 2 + char_idx * 20 + line_idx * 50) % 360
270
+ rgb = colorsys.hsv_to_rgb(hue / 360, 0.8, 1.0)
271
+ color = tuple(int(c * 255) for c in rgb)
272
+
273
+ draw.text((x_pos, y_offset), char, font=font, fill=color)
274
+
275
+ frame = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
276
+ out.write(frame)
277
+
278
+ def _get_font(self, size: int):
279
+ """Get font, fallback to default if custom font not available."""
280
+ try:
281
+ return ImageFont.truetype("arial.ttf", size)
282
+ except:
283
+ try:
284
+ return ImageFont.truetype("/System/Library/Fonts/Arial.ttf", size)
285
+ except:
286
+ try:
287
+ return ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", size)
288
+ except:
289
+ return ImageFont.load_default()
290
+
291
+ def generate_pattern_video(self, pattern_type: str, duration: int = 5, fps: int = 24,
292
+ width: int = 512, height: int = 512) -> str:
293
+ """Generate abstract pattern videos."""
294
+ output_path = os.path.join(self.temp_dir, f"pattern_{int(time.time())}.mp4")
295
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
296
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
297
+
298
+ total_frames = duration * fps
299
+
300
+ try:
301
+ if pattern_type == "spiral":
302
+ self._create_spiral_pattern(out, total_frames, width, height)
303
+ elif pattern_type == "particles":
304
+ self._create_particle_pattern(out, total_frames, width, height)
305
+ elif pattern_type == "waves":
306
+ self._create_wave_pattern(out, total_frames, width, height)
307
+ elif pattern_type == "fractal":
308
+ self._create_fractal_pattern(out, total_frames, width, height)
309
+ else:
310
+ self._create_spiral_pattern(out, total_frames, width, height)
311
+
312
+ out.release()
313
+ return output_path
314
+
315
+ except Exception as e:
316
+ out.release()
317
+ raise e
318
+
319
+ def _create_spiral_pattern(self, out, total_frames: int, width: int, height: int):
320
+ """Create animated spiral pattern."""
321
+ center_x, center_y = width // 2, height // 2
322
+
323
+ for frame_num in range(total_frames):
324
+ frame = np.zeros((height, width, 3), dtype=np.uint8)
325
+
326
+ # Draw spiral
327
+ max_radius = min(width, height) // 2
328
+ points = 500
329
+
330
+ for i in range(points):
331
+ t = i * 0.1
332
+ radius = (i / points) * max_radius
333
+ angle = t + frame_num * 0.05
334
+
335
+ x = int(center_x + radius * np.cos(angle))
336
+ y = int(center_y + radius * np.sin(angle))
337
+
338
+ if 0 <= x < width and 0 <= y < height:
339
+ hue = (i * 2 + frame_num * 5) % 360
340
+ rgb = colorsys.hsv_to_rgb(hue / 360, 1.0, 1.0)
341
+ color = tuple(int(c * 255) for c in rgb)
342
+
343
+ cv2.circle(frame, (x, y), 3, color, -1)
344
+
345
+ out.write(frame)
346
+
347
+ def _create_particle_pattern(self, out, total_frames: int, width: int, height: int):
348
+ """Create animated particle system."""
349
+ # Initialize particles
350
+ num_particles = 100
351
+ particles = []
352
+ for _ in range(num_particles):
353
+ particles.append({
354
+ 'x': np.random.randint(0, width),
355
+ 'y': np.random.randint(0, height),
356
+ 'vx': np.random.uniform(-2, 2),
357
+ 'vy': np.random.uniform(-2, 2),
358
+ 'size': np.random.randint(2, 8),
359
+ 'hue': np.random.randint(0, 360)
360
+ })
361
+
362
+ for frame_num in range(total_frames):
363
+ frame = np.zeros((height, width, 3), dtype=np.uint8)
364
+
365
+ # Update and draw particles
366
+ for particle in particles:
367
+ # Update position
368
+ particle['x'] += particle['vx']
369
+ particle['y'] += particle['vy']
370
+
371
+ # Bounce off walls
372
+ if particle['x'] <= 0 or particle['x'] >= width:
373
+ particle['vx'] *= -1
374
+ if particle['y'] <= 0 or particle['y'] >= height:
375
+ particle['vy'] *= -1
376
+
377
+ # Keep in bounds
378
+ particle['x'] = np.clip(particle['x'], 0, width - 1)
379
+ particle['y'] = np.clip(particle['y'], 0, height - 1)
380
+
381
+ # Draw particle
382
+ hue = (particle['hue'] + frame_num * 2) % 360
383
+ rgb = colorsys.hsv_to_rgb(hue / 360, 1.0, 1.0)
384
+ color = tuple(int(c * 255) for c in rgb)
385
+
386
+ cv2.circle(frame, (int(particle['x']), int(particle['y'])),
387
+ particle['size'], color, -1)
388
+
389
+ # Draw connections
390
+ for other in particles:
391
+ dist = np.sqrt((particle['x'] - other['x'])**2 +
392
+ (particle['y'] - other['y'])**2)
393
+ if dist < 100 and dist > 0:
394
+ cv2.line(frame, (int(particle['x']), int(particle['y'])),
395
+ (int(other['x']), int(other['y'])),
396
+ (color[0]//3, color[1]//3, color[2]//3), 1)
397
+
398
+ out.write(frame)
399
+
400
+ def _create_wave_pattern(self, out, total_frames: int, width: int, height: int):
401
+ """Create animated wave pattern."""
402
+ for frame_num in range(total_frames):
403
+ frame = np.zeros((height, width, 3), dtype=np.uint8)
404
+
405
+ # Create wave pattern
406
+ for y in range(0, height, 5):
407
+ for x in range(0, width, 5):
408
+ # Calculate wave height
409
+ wave1 = np.sin((x * 0.02 + frame_num * 0.05)) * 20
410
+ wave2 = np.cos((y * 0.02 + frame_num * 0.03)) * 20
411
+ wave3 = np.sin((x * 0.01 + y * 0.01 + frame_num * 0.02)) * 10
412
+
413
+ intensity = int(128 + wave1 + wave2 + wave3)
414
+ intensity = np.clip(intensity, 0, 255)
415
+
416
+ # Create color based on waves
417
+ hue = (intensity + frame_num * 2) % 360
418
+ rgb = colorsys.hsv_to_rgb(hue / 360, 0.7, intensity / 255)
419
+ color = tuple(int(c * 255) for c in rgb)
420
+
421
+ cv2.rectangle(frame, (x, y), (x + 5, y + 5), color, -1)
422
+
423
+ out.write(frame)
424
+
425
+ def _create_fractal_pattern(self, out, total_frames: int, width: int, height: int):
426
+ """Create animated fractal-like pattern."""
427
+ for frame_num in range(total_frames):
428
+ frame = np.zeros((height, width, 3), dtype=np.uint8)
429
+
430
+ # Create recursive pattern
431
+ max_depth = 5
432
+ self._draw_fractal_tree(frame, width//2, height-50, -90,
433
+ frame_num * 0.02, max_depth, 80)
434
+
435
+ out.write(frame)
436
+
437
+ def _draw_fractal_tree(self, frame, x, y, angle, time_offset, depth, length):
438
+ """Recursively draw fractal tree."""
439
+ if depth == 0:
440
+ return
441
+
442
+ # Calculate end point
443
+ end_x = x + int(length * np.cos(np.radians(angle)))
444
+ end_y = y + int(length * np.sin(np.radians(angle)))
445
+
446
+ # Color based on depth and time
447
+ hue = (depth * 60 + time_offset * 50) % 360
448
+ rgb = colorsys.hsv_to_rgb(hue / 360, 0.8, 0.8)
449
+ color = tuple(int(c * 255) for c in rgb)
450
+
451
+ # Draw branch
452
+ cv2.line(frame, (x, y), (end_x, end_y), color, max(1, depth//2))
453
+
454
+ # Recursive branches with animation
455
+ angle_variation = 20 + 10 * np.sin(time_offset)
456
+ self._draw_fractal_tree(frame, end_x, end_y, angle - angle_variation,
457
+ time_offset, depth - 1, length * 0.7)
458
+ self._draw_fractal_tree(frame, end_x, end_y, angle + angle_variation,
459
+ time_offset, depth - 1, length * 0.7)
460
 
461
+ # Initialize generator
462
+ generator = CPUVideoGenerator()
463
 
464
+ def generate_video(prompt: str, style: str, duration: int, fps: int,
465
+ width: int, height: int, video_type: str) -> Tuple[str, str]:
466
+ """Main video generation function."""
467
+ try:
468
+ if video_type == "Text Animation":
469
+ video_path = generator.generate_text_video(
470
+ prompt, duration, fps, style, width, height
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471
  )
472
+ else: # Pattern
473
+ video_path = generator.generate_pattern_video(
474
+ style, duration, fps, width, height
 
 
 
 
 
 
 
 
475
  )
476
+
477
+ return video_path, "✅ Video generated successfully!"
478
+ except Exception as e:
479
+ return None, f"❌ Error: {str(e)}"
480
 
481
+ # Create Gradio interface
482
+ with gr.Blocks(title="CPU Video Generator", theme=gr.themes.Soft()) as demo:
483
+ gr.Markdown("""
484
+ # 🎬 CPU Video Generator
485
+ Built with [anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
486
+
487
+ Generate animated videos entirely on CPU! Choose from text animations or abstract patterns.
488
+ """)
489
+
490
+ with gr.Row():
491
+ with gr.Column():
492
+ gr.Markdown("### 📝 Input Settings")
493
+
494
+ video_type = gr.Radio(
495
+ choices=["Text Animation", "Pattern"],
496
+ value="Text Animation",
497
+ label="Video Type",
498
+ info="Choose between text animations or abstract patterns"
499
  )
500
+
501
+ prompt = gr.Textbox(
502
+ label="Text Prompt (for Text Animation)",
503
+ placeholder="Enter your text here...",
504
+ value="Welcome to CPU Video Generation!",
505
+ lines=3
506
+ )
507
+
508
+ style = gr.Dropdown(
509
+ choices=["typewriter", "fade", "slide", "glitch", "wave",
510
+ "spiral", "particles", "waves", "fractal"],
511
+ value="typewriter",
512
+ label="Animation Style",
513
+ info="Choose the animation style"
514
+ )
515
+
516
  with gr.Row():
517
+ duration = gr.Slider(
518
+ minimum=2, maximum=15, value=5, step=1,
519
+ label="Duration (seconds)"
 
 
 
520
  )
521
+ fps = gr.Slider(
522
+ minimum=12, maximum=30, value=24, step=1,
523
+ label="FPS"
 
 
 
 
524
  )
525
+
526
  with gr.Row():
527
+ width = gr.Slider(
528
+ minimum=256, maximum=1024, value=512, step=64,
529
+ label="Width"
 
 
 
530
  )
531
+ height = gr.Slider(
532
+ minimum=256, maximum=1024, value=512, step=64,
533
+ label="Height"
 
 
 
 
534
  )
535
+
536
+ generate_btn = gr.Button("🎥 Generate Video", variant="primary", size="lg")
537
+
538
+ with gr.Column():
539
+ gr.Markdown("### 📹 Output")
540
+
541
+ video_output = gr.Video(
542
+ label="Generated Video",
543
+ autoplay=False,
544
+ show_download_button=True
545
+ )
546
+
547
+ status_output = gr.Textbox(
548
+ label="Status",
549
+ interactive=False
550
+ )
551
+
552
+ # Examples
553
+ gr.Markdown("### 💡 Examples")
554
+ examples = gr.Examples(
555
+ examples=[
556
+ ["Hello World! This is CPU video generation.", "typewriter", 5, 24, 512, 512, "Text Animation"],
557
+ ["Amazing effects running on CPU only!", "wave", 7, 24, 640, 480, "Text Animation"],
558
+ ["", "spiral", 5, 24, 512, 512, "Pattern"],
559
+ ["", "particles", 8, 24, 640, 480, "Pattern"],
560
+ ["Glitch art with CPU power!", "glitch", 6, 24, 512, 512, "Text Animation"],
561
+ ["", "fractal", 10, 24, 512, 512, "Pattern"],
562
  ],
563
+ inputs=[prompt, style, duration, fps, width, height, video_type],
564
+ outputs=[video_output, status_output],
565
+ fn=generate_video,
566
+ cache_examples=False
567
+ )
568
+
569
+ # Handle video type change
570
+ def update_visibility(video_type):
571
+ if video_type == "Text Animation":
572
+ return gr.update(visible=True), gr.update(choices=["typewriter", "fade", "slide", "glitch", "wave"], value="typewriter")
573
+ else:
574
+ return gr.update(visible=False), gr.update(choices=["spiral", "particles", "waves", "fractal"], value="spiral")
575
+
576
+ video_type.change(
577
+ update_visibility,
578
+ inputs=[video_type],
579
+ outputs=[prompt, style]
580
+ )
581
+
582
+ # Generate button click
583
+ generate_btn.click(
584
+ generate_video,
585
+ inputs=[prompt, style, duration, fps, width, height, video_type],
586
+ outputs=[video_output, status_output]
587
  )
588
 
589
  if __name__ == "__main__":
590
+ demo.launch(share=True)