dagloop5 commited on
Commit
9fa2ff5
·
verified ·
1 Parent(s): 1c84032

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -583
app.py DELETED
@@ -1,583 +0,0 @@
1
- import os
2
- import subprocess
3
- import sys
4
-
5
- # Disable torch.compile / dynamo before any torch import
6
- os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
-
9
- # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
-
12
- # Clone LTX-2 repo and install packages
13
- LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
- LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
-
16
- if not os.path.exists(LTX_REPO_DIR):
17
- print(f"Cloning {LTX_REPO_URL}...")
18
- subprocess.run(["git", "clone", "--depth", "1", LTX_REPO_URL, LTX_REPO_DIR], check=True)
19
-
20
- print("Installing ltx-core and ltx-pipelines from cloned repo...")
21
- subprocess.run(
22
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
23
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
24
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
25
- check=True,
26
- )
27
-
28
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
29
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
30
-
31
- import logging
32
- import random
33
- import tempfile
34
- from pathlib import Path
35
-
36
- import torch
37
- torch._dynamo.config.suppress_errors = True
38
- torch._dynamo.config.disable = True
39
-
40
- import spaces
41
- import gradio as gr
42
- import numpy as np
43
- from huggingface_hub import hf_hub_download, snapshot_download
44
- from ltx_core.loader import LoraPathStrengthAndSDOps, LTXV_LORA_COMFY_RENAMING_MAP
45
-
46
- from ltx_core.components.diffusion_steps import EulerDiffusionStep
47
- from ltx_core.components.noisers import GaussianNoiser
48
- from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
49
- from ltx_core.model.upsampler import upsample_video
50
- from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
51
- from ltx_core.quantization import QuantizationPolicy
52
- from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
53
- from ltx_pipelines.distilled import DistilledPipeline
54
- from ltx_pipelines.utils import euler_denoising_loop
55
- from ltx_pipelines.utils.args import ImageConditioningInput
56
- from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
57
- from ltx_pipelines.utils.helpers import (
58
- cleanup_memory,
59
- combined_image_conditionings,
60
- denoise_video_only,
61
- encode_prompts,
62
- simple_denoising_func,
63
- )
64
- from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
65
-
66
- # Force-patch xformers attention into the LTX attention module.
67
- from ltx_core.model.transformer import attention as _attn_mod
68
- print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
69
- try:
70
- from xformers.ops import memory_efficient_attention as _mea
71
- _attn_mod.memory_efficient_attention = _mea
72
- print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
73
- except Exception as e:
74
- print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
75
-
76
- logging.getLogger().setLevel(logging.INFO)
77
-
78
- LORA_RUNTIME_SCALE = 1.0
79
-
80
- MAX_SEED = np.iinfo(np.int32).max
81
- DEFAULT_PROMPT = (
82
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
83
- "the shell cracking and peeling apart in gentle low-gravity motion. "
84
- "Fine lunar dust lifts and drifts outward with each movement, floating "
85
- "in slow arcs before settling back onto the ground."
86
- )
87
- DEFAULT_FRAME_RATE = 24.0
88
-
89
- # Resolution presets: (width, height)
90
- RESOLUTIONS = {
91
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
92
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
93
- }
94
-
95
-
96
- class LTX23DistilledA2VPipeline(DistilledPipeline):
97
- """DistilledPipeline with optional audio conditioning."""
98
-
99
- def __call__(
100
- self,
101
- prompt: str,
102
- seed: int,
103
- height: int,
104
- width: int,
105
- num_frames: int,
106
- frame_rate: float,
107
- images: list[ImageConditioningInput],
108
- audio_path: str | None = None,
109
- tiling_config: TilingConfig | None = None,
110
- enhance_prompt: bool = False,
111
- ):
112
- # Standard path when no audio input is provided.
113
- print(prompt)
114
- if audio_path is None:
115
- return super().__call__(
116
- prompt=prompt,
117
- seed=seed,
118
- height=height,
119
- width=width,
120
- num_frames=num_frames,
121
- frame_rate=frame_rate,
122
- images=images,
123
- tiling_config=tiling_config,
124
- enhance_prompt=enhance_prompt,
125
- )
126
-
127
- generator = torch.Generator(device=self.device).manual_seed(seed)
128
- noiser = GaussianNoiser(generator=generator)
129
- stepper = EulerDiffusionStep()
130
- dtype = torch.bfloat16
131
-
132
- (ctx_p,) = encode_prompts(
133
- [prompt],
134
- self.model_ledger,
135
- enhance_first_prompt=enhance_prompt,
136
- enhance_prompt_image=images[0].path if len(images) > 0 else None,
137
- )
138
- video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
139
-
140
- video_duration = num_frames / frame_rate
141
- decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
142
- if decoded_audio is None:
143
- raise ValueError(f"Could not extract audio stream from {audio_path}")
144
-
145
- encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
146
- audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
147
- expected_frames = audio_shape.frames
148
- actual_frames = encoded_audio_latent.shape[2]
149
-
150
- if actual_frames > expected_frames:
151
- encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
152
- elif actual_frames < expected_frames:
153
- pad = torch.zeros(
154
- encoded_audio_latent.shape[0],
155
- encoded_audio_latent.shape[1],
156
- expected_frames - actual_frames,
157
- encoded_audio_latent.shape[3],
158
- device=encoded_audio_latent.device,
159
- dtype=encoded_audio_latent.dtype,
160
- )
161
- encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
162
-
163
- video_encoder = self.model_ledger.video_encoder()
164
- transformer = self.model_ledger.transformer()
165
- stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
166
-
167
- def denoising_loop(sigmas, video_state, audio_state, stepper):
168
- return euler_denoising_loop(
169
- sigmas=sigmas,
170
- video_state=video_state,
171
- audio_state=audio_state,
172
- stepper=stepper,
173
- denoise_fn=simple_denoising_func(
174
- video_context=video_context,
175
- audio_context=audio_context,
176
- transformer=transformer,
177
- ),
178
- )
179
-
180
- stage_1_output_shape = VideoPixelShape(
181
- batch=1,
182
- frames=num_frames,
183
- width=width // 2,
184
- height=height // 2,
185
- fps=frame_rate,
186
- )
187
- stage_1_conditionings = combined_image_conditionings(
188
- images=images,
189
- height=stage_1_output_shape.height,
190
- width=stage_1_output_shape.width,
191
- video_encoder=video_encoder,
192
- dtype=dtype,
193
- device=self.device,
194
- )
195
- video_state = denoise_video_only(
196
- output_shape=stage_1_output_shape,
197
- conditionings=stage_1_conditionings,
198
- noiser=noiser,
199
- sigmas=stage_1_sigmas,
200
- stepper=stepper,
201
- denoising_loop_fn=denoising_loop,
202
- components=self.pipeline_components,
203
- dtype=dtype,
204
- device=self.device,
205
- initial_audio_latent=encoded_audio_latent,
206
- )
207
-
208
- torch.cuda.synchronize()
209
- cleanup_memory()
210
-
211
- upscaled_video_latent = upsample_video(
212
- latent=video_state.latent[:1],
213
- video_encoder=video_encoder,
214
- upsampler=self.model_ledger.spatial_upsampler(),
215
- )
216
- stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
217
- stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
218
- stage_2_conditionings = combined_image_conditionings(
219
- images=images,
220
- height=stage_2_output_shape.height,
221
- width=stage_2_output_shape.width,
222
- video_encoder=video_encoder,
223
- dtype=dtype,
224
- device=self.device,
225
- )
226
- video_state = denoise_video_only(
227
- output_shape=stage_2_output_shape,
228
- conditionings=stage_2_conditionings,
229
- noiser=noiser,
230
- sigmas=stage_2_sigmas,
231
- stepper=stepper,
232
- denoising_loop_fn=denoising_loop,
233
- components=self.pipeline_components,
234
- dtype=dtype,
235
- device=self.device,
236
- noise_scale=stage_2_sigmas[0],
237
- initial_video_latent=upscaled_video_latent,
238
- initial_audio_latent=encoded_audio_latent,
239
- )
240
-
241
- torch.cuda.synchronize()
242
- del transformer
243
- del video_encoder
244
- cleanup_memory()
245
-
246
- decoded_video = vae_decode_video(
247
- video_state.latent,
248
- self.model_ledger.video_decoder(),
249
- tiling_config,
250
- generator,
251
- )
252
- original_audio = Audio(
253
- waveform=decoded_audio.waveform.squeeze(0),
254
- sampling_rate=decoded_audio.sampling_rate,
255
- )
256
- return decoded_video, original_audio
257
-
258
-
259
- # Model repos
260
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
261
- GEMMA_REPO ="rahul7star/gemma-3-12b-it-heretic"
262
-
263
-
264
- # Download model checkpoints
265
- print("=" * 80)
266
- print("Downloading LTX-2.3 distilled model + Gemma...")
267
- print("=" * 80)
268
-
269
- checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
270
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
271
- gemma_root = snapshot_download(repo_id=GEMMA_REPO)
272
-
273
- lora_path = hf_hub_download(
274
- repo_id="dagloop5/LoRA",
275
- filename="LoRA2.safetensors"
276
- )
277
-
278
- print(f"Checkpoint: {checkpoint_path}")
279
- print(f"Spatial upsampler: {spatial_upsampler_path}")
280
- print(f"Gemma root: {gemma_root}")
281
-
282
- # Initialize pipeline WITH text encoder and optional audio support
283
- pipeline = LTX23DistilledA2VPipeline(
284
- distilled_checkpoint_path=checkpoint_path,
285
- spatial_upsampler_path=spatial_upsampler_path,
286
- gemma_root=gemma_root,
287
- loras=[
288
- LoraPathStrengthAndSDOps(
289
- lora_path,
290
- 1.0, # fixed internal strength
291
- LTXV_LORA_COMFY_RENAMING_MAP
292
- )
293
- ],
294
- quantization=None,
295
- )
296
-
297
- # Preload all models for ZeroGPU tensor packing.
298
- print("Preloading all models (including Gemma and audio components)...")
299
- ledger = pipeline.model_ledger
300
- _transformer = ledger.transformer()
301
- _original_forward = _transformer.forward
302
-
303
- def _lora_scaled_forward(*args, **kwargs):
304
- out = _original_forward(*args, **kwargs)
305
-
306
- # Only scale deviation from baseline (approximation)
307
- scale = LORA_RUNTIME_SCALE
308
-
309
- if scale == 1.0:
310
- return out
311
- if scale == 0.0:
312
- # crude fallback: suppress output magnitude slightly
313
- if torch.is_tensor(out):
314
- return out * 0.5
315
- return out
316
-
317
- if torch.is_tensor(out):
318
- return out * scale
319
-
320
- if isinstance(out, tuple):
321
- return tuple(
322
- o * scale if torch.is_tensor(o) else o
323
- for o in out
324
- )
325
-
326
- return out
327
-
328
- _transformer.forward = _lora_scaled_forward
329
- _video_encoder = ledger.video_encoder()
330
- _video_decoder = ledger.video_decoder()
331
- _audio_encoder = ledger.audio_encoder()
332
- _audio_decoder = ledger.audio_decoder()
333
- _vocoder = ledger.vocoder()
334
- _spatial_upsampler = ledger.spatial_upsampler()
335
- _text_encoder = ledger.text_encoder()
336
- _embeddings_processor = ledger.gemma_embeddings_processor()
337
-
338
- ledger.transformer = lambda: _transformer
339
- ledger.video_encoder = lambda: _video_encoder
340
- ledger.video_decoder = lambda: _video_decoder
341
- ledger.audio_encoder = lambda: _audio_encoder
342
- ledger.audio_decoder = lambda: _audio_decoder
343
- ledger.vocoder = lambda: _vocoder
344
- ledger.spatial_upsampler = lambda: _spatial_upsampler
345
- ledger.text_encoder = lambda: _text_encoder
346
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
347
- print("All models preloaded (including Gemma text encoder and audio encoder)!")
348
-
349
- print("=" * 80)
350
- print("Pipeline ready!")
351
- print("=" * 80)
352
-
353
-
354
- def log_memory(tag: str):
355
- if torch.cuda.is_available():
356
- allocated = torch.cuda.memory_allocated() / 1024**3
357
- peak = torch.cuda.max_memory_allocated() / 1024**3
358
- free, total = torch.cuda.mem_get_info()
359
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
360
-
361
-
362
- def detect_aspect_ratio(image) -> str:
363
- if image is None:
364
- return "16:9"
365
- if hasattr(image, "size"):
366
- w, h = image.size
367
- elif hasattr(image, "shape"):
368
- h, w = image.shape[:2]
369
- else:
370
- return "16:9"
371
- ratio = w / h
372
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
373
- return min(candidates, key=lambda k: abs(ratio - candidates[k]))
374
-
375
-
376
- def on_image_upload(first_image, last_image, high_res):
377
- ref_image = first_image if first_image is not None else last_image
378
- aspect = detect_aspect_ratio(ref_image)
379
- tier = "high" if high_res else "low"
380
- w, h = RESOLUTIONS[tier][aspect]
381
- return gr.update(value=w), gr.update(value=h)
382
-
383
-
384
- def on_highres_toggle(first_image, last_image, high_res):
385
- ref_image = first_image if first_image is not None else last_image
386
- aspect = detect_aspect_ratio(ref_image)
387
- tier = "high" if high_res else "low"
388
- w, h = RESOLUTIONS[tier][aspect]
389
- return gr.update(value=w), gr.update(value=h)
390
-
391
-
392
- @spaces.GPU(duration=75)
393
- @torch.inference_mode()
394
- def generate_video(
395
- first_image,
396
- last_image,
397
- input_audio,
398
- prompt: str,
399
- duration: float,
400
- lora_strength: float,
401
- enhance_prompt: bool = True,
402
- seed: int = 42,
403
- randomize_seed: bool = True,
404
- height: int = 1024,
405
- width: int = 1536,
406
- progress=gr.Progress(track_tqdm=True),
407
- ):
408
- try:
409
- torch.cuda.reset_peak_memory_stats()
410
- log_memory("start")
411
-
412
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
413
- global LORA_RUNTIME_SCALE
414
- LORA_RUNTIME_SCALE = lora_strength
415
-
416
- frame_rate = DEFAULT_FRAME_RATE
417
- num_frames = int(duration * frame_rate) + 1
418
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
419
-
420
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
421
-
422
- images = []
423
- output_dir = Path("outputs")
424
- output_dir.mkdir(exist_ok=True)
425
-
426
- if first_image is not None:
427
- temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
428
- if hasattr(first_image, "save"):
429
- first_image.save(temp_first_path)
430
- else:
431
- temp_first_path = Path(first_image)
432
- images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
433
-
434
- if last_image is not None:
435
- temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
436
- if hasattr(last_image, "save"):
437
- last_image.save(temp_last_path)
438
- else:
439
- temp_last_path = Path(last_image)
440
- images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
441
-
442
- tiling_config = TilingConfig.default()
443
- video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
444
-
445
- log_memory("before pipeline call")
446
-
447
- video, audio = pipeline(
448
- prompt=prompt,
449
- seed=current_seed,
450
- height=int(height),
451
- width=int(width),
452
- num_frames=num_frames,
453
- frame_rate=frame_rate,
454
- images=images,
455
- audio_path=input_audio,
456
- tiling_config=tiling_config,
457
- enhance_prompt=enhance_prompt,
458
- )
459
-
460
- log_memory("after pipeline call")
461
-
462
- output_path = tempfile.mktemp(suffix=".mp4")
463
- encode_video(
464
- video=video,
465
- fps=frame_rate,
466
- audio=audio,
467
- output_path=output_path,
468
- video_chunks_number=video_chunks_number,
469
- )
470
-
471
- log_memory("after encode_video")
472
- return str(output_path), current_seed
473
-
474
- except Exception as e:
475
- import traceback
476
- log_memory("on error")
477
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
478
- return None, current_seed
479
-
480
-
481
- with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
482
- gr.Markdown("# LTX-2.3 F2LF:Heretic with Fast Audio-Video Generation with Frame Conditioning")
483
-
484
-
485
- with gr.Row():
486
- with gr.Column():
487
- with gr.Row():
488
- first_image = gr.Image(label="First Frame (Optional)", type="pil")
489
- last_image = gr.Image(label="Last Frame (Optional)", type="pil")
490
- input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
491
- prompt = gr.Textbox(
492
- label="Prompt",
493
- info="for best results - make it as elaborate as possible",
494
- value="Make this image come alive with cinematic motion, smooth animation",
495
- lines=3,
496
- placeholder="Describe the motion and animation you want...",
497
- )
498
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=10.0, value=3.0, step=0.1)
499
- lora_strength = gr.Slider(
500
- label="LoRA Strength",
501
- minimum=0.0,
502
- maximum=1.5,
503
- value=1.0,
504
- step=0.05,
505
- )
506
-
507
-
508
- generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
509
-
510
- with gr.Accordion("Advanced Settings", open=False):
511
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
512
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
513
- with gr.Row():
514
- width = gr.Number(label="Width", value=1536, precision=0)
515
- height = gr.Number(label="Height", value=1024, precision=0)
516
- with gr.Row():
517
- enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
518
- high_res = gr.Checkbox(label="High Resolution", value=True)
519
-
520
- with gr.Column():
521
- output_video = gr.Video(label="Generated Video", autoplay=True)
522
-
523
- gr.Examples(
524
- examples=[
525
- [
526
- None,
527
- "pinkknit.jpg",
528
- None,
529
- "The camera falls downward through darkness as if dropped into a tunnel. "
530
- "As it slows, five friends wearing pink knitted hats and sunglasses lean "
531
- "over and look down toward the camera with curious expressions. The lens "
532
- "has a strong fisheye effect, creating a circular frame around them. They "
533
- "crowd together closely, forming a symmetrical cluster while staring "
534
- "directly into the lens.",
535
- 3.0,
536
- 1.0,
537
- False,
538
- 42,
539
- True,
540
- 1024,
541
- 1024,
542
- ],
543
- ],
544
- inputs=[
545
- first_image, last_image, input_audio, prompt, duration, lora_strength,
546
- enhance_prompt, seed, randomize_seed, height, width,
547
- ],
548
- )
549
-
550
- first_image.change(
551
- fn=on_image_upload,
552
- inputs=[first_image, last_image, high_res],
553
- outputs=[width, height],
554
- )
555
-
556
- last_image.change(
557
- fn=on_image_upload,
558
- inputs=[first_image, last_image, high_res],
559
- outputs=[width, height],
560
- )
561
-
562
- high_res.change(
563
- fn=on_highres_toggle,
564
- inputs=[first_image, last_image, high_res],
565
- outputs=[width, height],
566
- )
567
-
568
- generate_btn.click(
569
- fn=generate_video,
570
- inputs=[
571
- first_image, last_image, input_audio, prompt, duration, lora_strength, enhance_prompt,
572
- seed, randomize_seed, height, width,
573
- ],
574
- outputs=[output_video, seed],
575
- )
576
-
577
-
578
- css = """
579
- .fillable{max-width: 1200px !important}
580
- """
581
-
582
- if __name__ == "__main__":
583
- demo.launch(theme=gr.themes.Citrus(), css=css)