dagloop5 commited on
Commit
1c84032
·
verified ·
1 Parent(s): 39f71d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -7
app.py CHANGED
@@ -291,7 +291,7 @@ pipeline = LTX23DistilledA2VPipeline(
291
  LTXV_LORA_COMFY_RENAMING_MAP
292
  )
293
  ],
294
- quantization=QuantizationPolicy.fp8_cast(),
295
  )
296
 
297
  # Preload all models for ZeroGPU tensor packing.
@@ -303,12 +303,26 @@ _original_forward = _transformer.forward
303
  def _lora_scaled_forward(*args, **kwargs):
304
  out = _original_forward(*args, **kwargs)
305
 
306
- # Apply runtime scaling to LoRA-influenced output
307
- # (LTX merges LoRA into attention residuals, so we scale output delta)
 
 
 
 
 
 
 
 
 
 
 
 
308
  if isinstance(out, tuple):
309
- return tuple(o * LORA_RUNTIME_SCALE if torch.is_tensor(o) else o for o in out)
310
- elif torch.is_tensor(out):
311
- return out * LORA_RUNTIME_SCALE
 
 
312
  return out
313
 
314
  _transformer.forward = _lora_scaled_forward
@@ -519,6 +533,7 @@ with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
519
  "crowd together closely, forming a symmetrical cluster while staring "
520
  "directly into the lens.",
521
  3.0,
 
522
  False,
523
  42,
524
  True,
@@ -527,7 +542,7 @@ with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
527
  ],
528
  ],
529
  inputs=[
530
- first_image, last_image, input_audio, prompt, duration,
531
  enhance_prompt, seed, randomize_seed, height, width,
532
  ],
533
  )
 
291
  LTXV_LORA_COMFY_RENAMING_MAP
292
  )
293
  ],
294
+ quantization=None,
295
  )
296
 
297
  # Preload all models for ZeroGPU tensor packing.
 
303
  def _lora_scaled_forward(*args, **kwargs):
304
  out = _original_forward(*args, **kwargs)
305
 
306
+ # Only scale deviation from baseline (approximation)
307
+ scale = LORA_RUNTIME_SCALE
308
+
309
+ if scale == 1.0:
310
+ return out
311
+ if scale == 0.0:
312
+ # crude fallback: suppress output magnitude slightly
313
+ if torch.is_tensor(out):
314
+ return out * 0.5
315
+ return out
316
+
317
+ if torch.is_tensor(out):
318
+ return out * scale
319
+
320
  if isinstance(out, tuple):
321
+ return tuple(
322
+ o * scale if torch.is_tensor(o) else o
323
+ for o in out
324
+ )
325
+
326
  return out
327
 
328
  _transformer.forward = _lora_scaled_forward
 
533
  "crowd together closely, forming a symmetrical cluster while staring "
534
  "directly into the lens.",
535
  3.0,
536
+ 1.0,
537
  False,
538
  42,
539
  True,
 
542
  ],
543
  ],
544
  inputs=[
545
+ first_image, last_image, input_audio, prompt, duration, lora_strength,
546
  enhance_prompt, seed, randomize_seed, height, width,
547
  ],
548
  )