dagloop5 commited on
Commit
c41e6f7
·
verified ·
1 Parent(s): b67f132

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -667
app.py DELETED
@@ -1,667 +0,0 @@
1
- import os
2
- import subprocess
3
- import sys
4
-
5
- # Disable torch.compile / dynamo before any torch import
6
- os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
-
9
- # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
-
12
- # Clone LTX-2 repo and install packages
13
- LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
- LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
-
16
- if not os.path.exists(LTX_REPO_DIR):
17
- print(f"Cloning {LTX_REPO_URL}...")
18
- subprocess.run(["git", "clone", "--depth", "1", LTX_REPO_URL, LTX_REPO_DIR], check=True)
19
-
20
- print("Installing ltx-core and ltx-pipelines from cloned repo...")
21
- subprocess.run(
22
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
23
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
24
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
25
- check=True,
26
- )
27
-
28
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
29
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
30
-
31
- import logging
32
- import random
33
- import tempfile
34
- from pathlib import Path
35
-
36
- import torch
37
- torch._dynamo.config.suppress_errors = True
38
- torch._dynamo.config.disable = True
39
-
40
- import spaces
41
- import gradio as gr
42
- import numpy as np
43
- from collections import OrderedDict
44
- from huggingface_hub import hf_hub_download, snapshot_download
45
-
46
- from ltx_core.components.diffusion_steps import EulerDiffusionStep
47
- from ltx_core.components.noisers import GaussianNoiser
48
- from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
49
- from ltx_core.model.upsampler import upsample_video
50
- from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
51
- # >>> ADD these imports (place immediately after your video_vae import)
52
- from ltx_core.loader import LoraPathStrengthAndSDOps, LTXV_LORA_COMFY_RENAMING_MAP
53
- from ltx_core.quantization import QuantizationPolicy
54
- from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
55
- from ltx_pipelines.distilled import DistilledPipeline
56
- from ltx_pipelines.utils import euler_denoising_loop
57
- from ltx_pipelines.utils.args import ImageConditioningInput
58
- from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
59
- from ltx_pipelines.utils.helpers import (
60
- cleanup_memory,
61
- combined_image_conditionings,
62
- denoise_video_only,
63
- encode_prompts,
64
- simple_denoising_func,
65
- )
66
- from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
67
-
68
- # Force-patch xformers attention into the LTX attention module.
69
- from ltx_core.model.transformer import attention as _attn_mod
70
- print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
71
- try:
72
- from xformers.ops import memory_efficient_attention as _mea
73
- _attn_mod.memory_efficient_attention = _mea
74
- print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
75
- except Exception as e:
76
- print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
77
-
78
- logging.getLogger().setLevel(logging.INFO)
79
-
80
- MAX_SEED = np.iinfo(np.int32).max
81
- DEFAULT_PROMPT = (
82
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
83
- "the shell cracking and peeling apart in gentle low-gravity motion. "
84
- "Fine lunar dust lifts and drifts outward with each movement, floating "
85
- "in slow arcs before settling back onto the ground."
86
- )
87
- DEFAULT_FRAME_RATE = 24.0
88
-
89
- # Resolution presets: (width, height)
90
- RESOLUTIONS = {
91
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
92
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
93
- }
94
-
95
-
96
- class LTX23DistilledA2VPipeline(DistilledPipeline):
97
- """DistilledPipeline with optional audio conditioning."""
98
-
99
- def __call__(
100
- self,
101
- prompt: str,
102
- seed: int,
103
- height: int,
104
- width: int,
105
- num_frames: int,
106
- frame_rate: float,
107
- images: list[ImageConditioningInput],
108
- audio_path: str | None = None,
109
- tiling_config: TilingConfig | None = None,
110
- enhance_prompt: bool = False,
111
- ):
112
- # Standard path when no audio input is provided.
113
- print(prompt)
114
- if audio_path is None:
115
- return super().__call__(
116
- prompt=prompt,
117
- seed=seed,
118
- height=height,
119
- width=width,
120
- num_frames=num_frames,
121
- frame_rate=frame_rate,
122
- images=images,
123
- tiling_config=tiling_config,
124
- enhance_prompt=enhance_prompt,
125
- )
126
-
127
- generator = torch.Generator(device=self.device).manual_seed(seed)
128
- noiser = GaussianNoiser(generator=generator)
129
- stepper = EulerDiffusionStep()
130
- dtype = torch.bfloat16
131
-
132
- (ctx_p,) = encode_prompts(
133
- [prompt],
134
- self.model_ledger,
135
- enhance_first_prompt=enhance_prompt,
136
- enhance_prompt_image=images[0].path if len(images) > 0 else None,
137
- )
138
- video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
139
-
140
- video_duration = num_frames / frame_rate
141
- decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
142
- if decoded_audio is None:
143
- raise ValueError(f"Could not extract audio stream from {audio_path}")
144
-
145
- encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
146
- audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
147
- expected_frames = audio_shape.frames
148
- actual_frames = encoded_audio_latent.shape[2]
149
-
150
- if actual_frames > expected_frames:
151
- encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
152
- elif actual_frames < expected_frames:
153
- pad = torch.zeros(
154
- encoded_audio_latent.shape[0],
155
- encoded_audio_latent.shape[1],
156
- expected_frames - actual_frames,
157
- encoded_audio_latent.shape[3],
158
- device=encoded_audio_latent.device,
159
- dtype=encoded_audio_latent.dtype,
160
- )
161
- encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
162
-
163
- video_encoder = self.model_ledger.video_encoder()
164
- transformer = self.model_ledger.transformer()
165
- stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
166
-
167
- def denoising_loop(sigmas, video_state, audio_state, stepper):
168
- return euler_denoising_loop(
169
- sigmas=sigmas,
170
- video_state=video_state,
171
- audio_state=audio_state,
172
- stepper=stepper,
173
- denoise_fn=simple_denoising_func(
174
- video_context=video_context,
175
- audio_context=audio_context,
176
- transformer=transformer,
177
- ),
178
- )
179
-
180
- stage_1_output_shape = VideoPixelShape(
181
- batch=1,
182
- frames=num_frames,
183
- width=width // 2,
184
- height=height // 2,
185
- fps=frame_rate,
186
- )
187
- stage_1_conditionings = combined_image_conditionings(
188
- images=images,
189
- height=stage_1_output_shape.height,
190
- width=stage_1_output_shape.width,
191
- video_encoder=video_encoder,
192
- dtype=dtype,
193
- device=self.device,
194
- )
195
- video_state = denoise_video_only(
196
- output_shape=stage_1_output_shape,
197
- conditionings=stage_1_conditionings,
198
- noiser=noiser,
199
- sigmas=stage_1_sigmas,
200
- stepper=stepper,
201
- denoising_loop_fn=denoising_loop,
202
- components=self.pipeline_components,
203
- dtype=dtype,
204
- device=self.device,
205
- initial_audio_latent=encoded_audio_latent,
206
- )
207
-
208
- torch.cuda.synchronize()
209
- cleanup_memory()
210
-
211
- upscaled_video_latent = upsample_video(
212
- latent=video_state.latent[:1],
213
- video_encoder=video_encoder,
214
- upsampler=self.model_ledger.spatial_upsampler(),
215
- )
216
- stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
217
- stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
218
- stage_2_conditionings = combined_image_conditionings(
219
- images=images,
220
- height=stage_2_output_shape.height,
221
- width=stage_2_output_shape.width,
222
- video_encoder=video_encoder,
223
- dtype=dtype,
224
- device=self.device,
225
- )
226
- video_state = denoise_video_only(
227
- output_shape=stage_2_output_shape,
228
- conditionings=stage_2_conditionings,
229
- noiser=noiser,
230
- sigmas=stage_2_sigmas,
231
- stepper=stepper,
232
- denoising_loop_fn=denoising_loop,
233
- components=self.pipeline_components,
234
- dtype=dtype,
235
- device=self.device,
236
- noise_scale=stage_2_sigmas[0],
237
- initial_video_latent=upscaled_video_latent,
238
- initial_audio_latent=encoded_audio_latent,
239
- )
240
-
241
- torch.cuda.synchronize()
242
- del transformer
243
- del video_encoder
244
- cleanup_memory()
245
-
246
- decoded_video = vae_decode_video(
247
- video_state.latent,
248
- self.model_ledger.video_decoder(),
249
- tiling_config,
250
- generator,
251
- )
252
- original_audio = Audio(
253
- waveform=decoded_audio.waveform.squeeze(0),
254
- sampling_rate=decoded_audio.sampling_rate,
255
- )
256
- return decoded_video, original_audio
257
-
258
-
259
- # Model repos
260
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
261
- GEMMA_REPO ="rahul7star/gemma-3-12b-it-heretic"
262
-
263
-
264
- # Download model checkpoints
265
- print("=" * 80)
266
- print("Downloading LTX-2.3 distilled model + Gemma...")
267
- print("=" * 80)
268
-
269
- checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
270
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
271
- gemma_root = snapshot_download(repo_id=GEMMA_REPO)
272
-
273
- # >>> ADD: download and prepare LoRA descriptor
274
- print("Downloading LoRA for this Space (dagloop5/LoRA:LoRA2.safetensors)...")
275
- lora_path = hf_hub_download(repo_id="dagloop5/LoRA", filename="LoRA2.safetensors")
276
- # Create a descriptor object that the LTX loader expects.
277
- # initial strength is set to 1.0; we'll mutate `.strength` at runtime from the UI slider.
278
- lora_descriptor = LoraPathStrengthAndSDOps(lora_path, 1.0, LTXV_LORA_COMFY_RENAMING_MAP)
279
-
280
- print(f"LoRA: {lora_path}")
281
-
282
- print(f"Checkpoint: {checkpoint_path}")
283
- print(f"Spatial upsampler: {spatial_upsampler_path}")
284
- print(f"Gemma root: {gemma_root}")
285
-
286
- # Initialize pipeline WITH text encoder and optional audio support
287
- pipeline = LTX23DistilledA2VPipeline(
288
- distilled_checkpoint_path=checkpoint_path,
289
- spatial_upsampler_path=spatial_upsampler_path,
290
- gemma_root=gemma_root,
291
- loras=[lora_descriptor],
292
- quantization=QuantizationPolicy.fp8_cast(),
293
- )
294
-
295
- # Preload all models for ZeroGPU tensor packing.
296
- # >>> REPLACE the "Preload all models" block with this one:
297
- print("Preloading models (pinning decoders/encoders but leaving transformer dynamic)...")
298
- ledger = pipeline.model_ledger
299
-
300
- # NOTE: do NOT call ledger.transformer() here. We keep the transformer's construction dynamic
301
- # so that changes to lora_descriptor.strength (made at runtime) are applied when the transformer
302
- # is built. We DO preload other components that are safe to pin.
303
- _video_encoder = ledger.video_encoder()
304
- _video_decoder = ledger.video_decoder()
305
- _audio_encoder = ledger.audio_encoder()
306
- _audio_decoder = ledger.audio_decoder()
307
- _vocoder = ledger.vocoder()
308
- _spatial_upsampler = ledger.spatial_upsampler()
309
- _text_encoder = ledger.text_encoder()
310
- _embeddings_processor = ledger.gemma_embeddings_processor()
311
-
312
- # Replace ledger methods to return the pinned objects for those components.
313
- # Intentionally do NOT override ledger.transformer so transformer is built when needed.
314
- ledger.video_encoder = lambda: _video_encoder
315
- ledger.video_decoder = lambda: _video_decoder
316
- ledger.audio_encoder = lambda: _audio_encoder
317
- ledger.audio_decoder = lambda: _audio_decoder
318
- ledger.vocoder = lambda: _vocoder
319
- ledger.spatial_upsampler = lambda: _spatial_upsampler
320
- ledger.text_encoder = lambda: _text_encoder
321
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
322
-
323
- print("Selected models pinned. Transformer remains dynamic to reflect runtime LoRA strength.")
324
- print("Preload complete.")
325
-
326
- print("=" * 80)
327
- print("Pipeline ready!")
328
- print("=" * 80)
329
-
330
- # ----------------------------
331
- # Pipeline cache for LoRA strengths (keeps at most 2 pipelines to limit VRAM)
332
- # ----------------------------
333
- pipeline_cache: OrderedDict[float, LTX23DistilledA2VPipeline] = OrderedDict()
334
- current_lora_strength: float = round(1.0, 2)
335
- pipeline_cache[current_lora_strength] = pipeline
336
- CACHE_MAX_SIZE = 2
337
- print(f"[CACHE] initialized pipeline cache with strength={current_lora_strength}")
338
-
339
- def log_memory(tag: str):
340
- if torch.cuda.is_available():
341
- allocated = torch.cuda.memory_allocated() / 1024**3
342
- peak = torch.cuda.max_memory_allocated() / 1024**3
343
- free, total = torch.cuda.mem_get_info()
344
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
345
-
346
-
347
- def detect_aspect_ratio(image) -> str:
348
- if image is None:
349
- return "16:9"
350
- if hasattr(image, "size"):
351
- w, h = image.size
352
- elif hasattr(image, "shape"):
353
- h, w = image.shape[:2]
354
- else:
355
- return "16:9"
356
- ratio = w / h
357
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
358
- return min(candidates, key=lambda k: abs(ratio - candidates[k]))
359
-
360
-
361
- def on_image_upload(first_image, last_image, high_res):
362
- ref_image = first_image if first_image is not None else last_image
363
- aspect = detect_aspect_ratio(ref_image)
364
- tier = "high" if high_res else "low"
365
- w, h = RESOLUTIONS[tier][aspect]
366
- return gr.update(value=w), gr.update(value=h)
367
-
368
-
369
- def on_highres_toggle(first_image, last_image, high_res):
370
- ref_image = first_image if first_image is not None else last_image
371
- aspect = detect_aspect_ratio(ref_image)
372
- tier = "high" if high_res else "low"
373
- w, h = RESOLUTIONS[tier][aspect]
374
- return gr.update(value=w), gr.update(value=h)
375
-
376
-
377
- @spaces.GPU(duration=75)
378
- @torch.inference_mode()
379
- def generate_video(
380
- first_image,
381
- last_image,
382
- input_audio,
383
- prompt: str,
384
- duration: float,
385
- enhance_prompt: bool = True,
386
- seed: int = 42,
387
- randomize_seed: bool = True,
388
- height: int = 1024,
389
- width: int = 1536,
390
- lora_strength: float = 1.0,
391
- progress=gr.Progress(track_tqdm=True),
392
- ):
393
- try:
394
- global pipeline, pipeline_cache, current_lora_strength
395
- torch.cuda.reset_peak_memory_stats()
396
- log_memory("start")
397
-
398
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
399
-
400
- frame_rate = DEFAULT_FRAME_RATE
401
- num_frames = int(duration * frame_rate) + 1
402
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
403
-
404
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
405
-
406
- images = []
407
- output_dir = Path("outputs")
408
- output_dir.mkdir(exist_ok=True)
409
-
410
- if first_image is not None:
411
- temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
412
- if hasattr(first_image, "save"):
413
- first_image.save(temp_first_path)
414
- else:
415
- temp_first_path = Path(first_image)
416
- images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
417
-
418
- if last_image is not None:
419
- temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
420
- if hasattr(last_image, "save"):
421
- last_image.save(temp_last_path)
422
- else:
423
- temp_last_path = Path(last_image)
424
- images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
425
-
426
- tiling_config = TilingConfig.default()
427
- video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
428
-
429
- # ----------------------------
430
- # Pipeline-per-strength (small LRU cache) — safe, deterministic LoRA switching
431
- # ----------------------------
432
- # Globals used: pipeline, pipeline_cache, current_lora_strength, CACHE_MAX_SIZE
433
- requested_strength = round(float(lora_strength), 2)
434
-
435
- # fast-path: same strength currently loaded
436
- if requested_strength == current_lora_strength:
437
- print(f"[LoRA] requested strength {requested_strength} == current {current_lora_strength} -> using current pipeline")
438
- else:
439
- print(f"[LoRA] requested strength {requested_strength} != current {current_lora_strength}")
440
-
441
- # if cached, swap to that pipeline (move to end to mark as recently used)
442
- if requested_strength in pipeline_cache:
443
- print(f"[LoRA] using cached pipeline for strength={requested_strength}")
444
- # set pipeline to cached instance & mark as most-recently-used
445
- cached = pipeline_cache.pop(requested_strength)
446
- pipeline_cache[requested_strength] = cached
447
- pipeline = cached
448
- current_lora_strength = requested_strength
449
-
450
- else:
451
- # Build new pipeline for requested strength.
452
- print(f"[LoRA] building new pipeline for strength={requested_strength} (this will free and reallocate memory)")
453
- # Free the previous pipeline & its GPU memory BEFORE building the new one
454
- try:
455
- # remove previous pipeline from cache (if present)
456
- if current_lora_strength in pipeline_cache:
457
- pipeline_cache.pop(current_lora_strength, None)
458
-
459
- # delete current pipeline object reference
460
- try:
461
- del pipeline
462
- except Exception:
463
- pass
464
-
465
- # aggressively free memory
466
- cleanup_memory()
467
- torch.cuda.empty_cache()
468
- print("[LoRA] freed memory, starting pipeline build")
469
- except Exception as e:
470
- print(f"[LoRA] error while freeing old pipeline: {e}")
471
-
472
- # create a runtime LoRA descriptor and build a fresh pipeline
473
- runtime_lora = LoraPathStrengthAndSDOps(lora_path, float(requested_strength), LTXV_LORA_COMFY_RENAMING_MAP)
474
- new_pipeline = LTX23DistilledA2VPipeline(
475
- distilled_checkpoint_path=checkpoint_path,
476
- spatial_upsampler_path=spatial_upsampler_path,
477
- gemma_root=gemma_root,
478
- loras=[runtime_lora],
479
- quantization=QuantizationPolicy.fp8_cast(),
480
- )
481
-
482
- # Pin safe components (same preloads as original) so heavy parts remain stable.
483
- try:
484
- ledger = new_pipeline.model_ledger
485
- _video_encoder = ledger.video_encoder()
486
- _video_decoder = ledger.video_decoder()
487
- _audio_encoder = ledger.audio_encoder()
488
- _audio_decoder = ledger.audio_decoder()
489
- _vocoder = ledger.vocoder()
490
- _spatial_upsampler = ledger.spatial_upsampler()
491
- _text_encoder = ledger.text_encoder()
492
- _embeddings_processor = ledger.gemma_embeddings_processor()
493
-
494
- ledger.video_encoder = lambda: _video_encoder
495
- ledger.video_decoder = lambda: _video_decoder
496
- ledger.audio_encoder = lambda: _audio_encoder
497
- ledger.audio_decoder = lambda: _audio_decoder
498
- ledger.vocoder = lambda: _vocoder
499
- ledger.spatial_upsampler = lambda: _spatial_upsampler
500
- ledger.text_encoder = lambda: _text_encoder
501
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
502
- print("[LoRA] new pipeline preloaded and pinned safe components")
503
- except Exception as e:
504
- print(f"[LoRA] warning preloading pinned components failed: {e}")
505
-
506
- # Set as current pipeline and cache it
507
- pipeline = new_pipeline
508
- pipeline_cache[requested_strength] = pipeline
509
- current_lora_strength = requested_strength
510
-
511
- # Evict oldest if cache size exceeded
512
- try:
513
- while len(pipeline_cache) > CACHE_MAX_SIZE:
514
- evicted_strength, evicted_pipeline = pipeline_cache.popitem(last=False)
515
- try:
516
- del evicted_pipeline
517
- except Exception:
518
- pass
519
- cleanup_memory()
520
- torch.cuda.empty_cache()
521
- print(f"[CACHE] evicted pipeline strength={evicted_strength}")
522
- except Exception as e:
523
- print(f"[CACHE] eviction error: {e}")
524
-
525
- # end of pipeline-per-strength swap/build
526
- log_memory("before pipeline call")
527
-
528
- video, audio = pipeline(
529
- prompt=prompt,
530
- seed=current_seed,
531
- height=int(height),
532
- width=int(width),
533
- num_frames=num_frames,
534
- frame_rate=frame_rate,
535
- images=images,
536
- audio_path=input_audio,
537
- tiling_config=tiling_config,
538
- enhance_prompt=enhance_prompt,
539
- )
540
-
541
- log_memory("after pipeline call")
542
-
543
- output_path = tempfile.mktemp(suffix=".mp4")
544
- encode_video(
545
- video=video,
546
- fps=frame_rate,
547
- audio=audio,
548
- output_path=output_path,
549
- video_chunks_number=video_chunks_number,
550
- )
551
-
552
- log_memory("after encode_video")
553
- return str(output_path), current_seed
554
-
555
- except Exception as e:
556
- import traceback
557
- log_memory("on error")
558
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
559
- return None, current_seed
560
-
561
-
562
- with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
563
- gr.Markdown("# LTX-2.3 F2LF:Heretic with Fast Audio-Video Generation with Frame Conditioning")
564
-
565
-
566
- with gr.Row():
567
- with gr.Column():
568
- with gr.Row():
569
- first_image = gr.Image(label="First Frame (Optional)", type="pil")
570
- last_image = gr.Image(label="Last Frame (Optional)", type="pil")
571
- input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
572
- prompt = gr.Textbox(
573
- label="Prompt",
574
- info="for best results - make it as elaborate as possible",
575
- value="Make this image come alive with cinematic motion, smooth animation",
576
- lines=3,
577
- placeholder="Describe the motion and animation you want...",
578
- )
579
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=30.0, value=10.0, step=0.1)
580
-
581
-
582
- generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
583
-
584
- with gr.Accordion("Advanced Settings", open=False):
585
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
586
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
587
- with gr.Row():
588
- width = gr.Number(label="Width", value=1536, precision=0)
589
- height = gr.Number(label="Height", value=1024, precision=0)
590
- with gr.Row():
591
- enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
592
- high_res = gr.Checkbox(label="High Resolution", value=True)
593
-
594
- # >>> MOVE slider OUTSIDE the row
595
- lora_strength = gr.Slider(
596
- label="LoRA Strength",
597
- info="Scale for the LoRA weights (0.0 = off). Set near 1.0 for full effect.",
598
- minimum=0.0,
599
- maximum=2.0,
600
- value=1.0,
601
- step=0.01,
602
- )
603
-
604
- with gr.Column():
605
- output_video = gr.Video(label="Generated Video", autoplay=False)
606
-
607
- gr.Examples(
608
- examples=[
609
- [
610
- None,
611
- "pinkknit.jpg",
612
- None,
613
- "The camera falls downward through darkness as if dropped into a tunnel. "
614
- "As it slows, five friends wearing pink knitted hats and sunglasses lean "
615
- "over and look down toward the camera with curious expressions. The lens "
616
- "has a strong fisheye effect, creating a circular frame around them. They "
617
- "crowd together closely, forming a symmetrical cluster while staring "
618
- "directly into the lens.",
619
- 3.0,
620
- False,
621
- 42,
622
- True,
623
- 1024,
624
- 1024,
625
- 1.0,
626
- ],
627
- ],
628
- inputs=[
629
- first_image, last_image, input_audio, prompt, duration,
630
- enhance_prompt, seed, randomize_seed, height, width, lora_strength
631
- ],
632
- )
633
-
634
- first_image.change(
635
- fn=on_image_upload,
636
- inputs=[first_image, last_image, high_res],
637
- outputs=[width, height],
638
- )
639
-
640
- last_image.change(
641
- fn=on_image_upload,
642
- inputs=[first_image, last_image, high_res],
643
- outputs=[width, height],
644
- )
645
-
646
- high_res.change(
647
- fn=on_highres_toggle,
648
- inputs=[first_image, last_image, high_res],
649
- outputs=[width, height],
650
- )
651
-
652
- generate_btn.click(
653
- fn=generate_video,
654
- inputs=[
655
- first_image, last_image, input_audio, prompt, duration, enhance_prompt,
656
- seed, randomize_seed, height, width, lora_strength
657
- ],
658
- outputs=[output_video, seed],
659
- )
660
-
661
-
662
- css = """
663
- .fillable{max-width: 1200px !important}
664
- """
665
-
666
- if __name__ == "__main__":
667
- demo.launch(theme=gr.themes.Citrus(), css=css)