RioShiina commited on
Commit
fef91dc
·
verified ·
1 Parent(s): f6cb2e8

Upload folder using huggingface_hub

Browse files
chain_injectors/lora_injector.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+
3
+ def inject(assembler, chain_definition, chain_items):
4
+ if not chain_items:
5
+ return
6
+
7
+ start_node_name = chain_definition.get('start')
8
+ start_node_id = None
9
+ if start_node_name:
10
+ if start_node_name not in assembler.node_map:
11
+ print(f"Warning: Start node '{start_node_name}' for dynamic LoRA chain not found. Skipping chain.")
12
+ return
13
+ start_node_id = assembler.node_map[start_node_name]
14
+
15
+ output_map = chain_definition.get('output_map', {})
16
+ current_connections = {}
17
+ for key, type_name in output_map.items():
18
+ if ':' in str(key):
19
+ node_name, idx_str = key.split(':')
20
+ if node_name not in assembler.node_map:
21
+ print(f"Warning: Node '{node_name}' in chain's output_map not found. Skipping.")
22
+ continue
23
+ node_id = assembler.node_map[node_name]
24
+ start_output_idx = int(idx_str)
25
+ current_connections[type_name] = [node_id, start_output_idx]
26
+ elif start_node_id:
27
+ start_output_idx = int(key)
28
+ current_connections[type_name] = [start_node_id, start_output_idx]
29
+ else:
30
+ print(f"Warning: LoRA chain has no 'start' node defined, and an output_map key '{key}' is not in 'node:index' format. Skipping this connection.")
31
+
32
+
33
+ input_map = chain_definition.get('input_map', {})
34
+ chain_output_map = chain_definition.get('template_output_map', { "0": "model", "1": "clip" })
35
+
36
+ for item_data in chain_items:
37
+ template_name = chain_definition['template']
38
+ template = assembler._get_node_template(template_name)
39
+ node_data = deepcopy(template)
40
+
41
+ for param_name, value in item_data.items():
42
+ if param_name in node_data['inputs']:
43
+ node_data['inputs'][param_name] = value
44
+
45
+ for type_name, input_name in input_map.items():
46
+ if type_name in current_connections:
47
+ node_data['inputs'][input_name] = current_connections[type_name]
48
+
49
+ new_node_id = assembler._get_unique_id()
50
+ assembler.workflow[new_node_id] = node_data
51
+
52
+ for idx_str, type_name in chain_output_map.items():
53
+ current_connections[type_name] = [new_node_id, int(idx_str)]
54
+
55
+ end_input_map = chain_definition.get('end_input_map', {})
56
+ for type_name, targets in end_input_map.items():
57
+ if type_name in current_connections:
58
+ if not isinstance(targets, list):
59
+ targets = [targets]
60
+
61
+ for target_str in targets:
62
+ end_node_name, end_input_name = target_str.split(':')
63
+ if end_node_name in assembler.node_map:
64
+ end_node_id = assembler.node_map[end_node_name]
65
+ assembler.workflow[end_node_id]['inputs'][end_input_name] = current_connections[type_name]
66
+ else:
67
+ print(f"Warning: End node '{end_node_name}' for dynamic chain not found. Skipping connection.")
core/model_manager.py CHANGED
@@ -1,14 +1,10 @@
1
  import gc
2
- from typing import Dict, List, Any, Set
3
 
4
- import torch
5
  import gradio as gr
6
- from comfy import model_management
7
 
8
- from core.settings import ALL_MODEL_MAP, CHECKPOINT_DIR, LORA_DIR, DIFFUSION_MODELS_DIR, VAE_DIR, TEXT_ENCODERS_DIR
9
- from comfy_integration.nodes import LoraLoader
10
- from nodes import NODE_CLASS_MAPPINGS
11
- from utils.app_utils import get_value_at_index, _ensure_model_downloaded
12
 
13
 
14
  class ModelManager:
@@ -22,86 +18,9 @@ class ModelManager:
22
  def __init__(self):
23
  if hasattr(self, 'initialized'):
24
  return
25
- self.loaded_models: Dict[str, Any] = {}
26
- self.last_active_loras: List[Dict[str, Any]] = []
27
  self.initialized = True
28
  print("✅ ModelManager initialized.")
29
 
30
- def get_loaded_model_names(self) -> Set[str]:
31
- return set(self.loaded_models.keys())
32
-
33
- def _load_model_combo(self, display_name: str, active_loras: List[Dict[str, Any]], progress) -> Dict[str, Any]:
34
- print(f"--- [ModelManager] Loading model combo: '{display_name}' ---")
35
-
36
- if display_name not in ALL_MODEL_MAP:
37
- raise ValueError(f"Model '{display_name}' not found in configuration.")
38
-
39
- _, components, _, _ = ALL_MODEL_MAP[display_name]
40
-
41
- unet_filename = components.get('unet')
42
- clip_filename = components.get('clip')
43
- vae_filename = components.get('vae')
44
-
45
- if not all([unet_filename, clip_filename, vae_filename]):
46
- raise ValueError(f"Model '{display_name}' is missing required components (unet, clip, or vae) in model_list.yaml.")
47
-
48
- unet_loader = NODE_CLASS_MAPPINGS["UNETLoader"]()
49
- clip_loader = NODE_CLASS_MAPPINGS["CLIPLoader"]()
50
- vae_loader = NODE_CLASS_MAPPINGS["VAELoader"]()
51
-
52
- print(" - Loading UNET...")
53
- unet_tuple = unet_loader.load_unet(unet_name=unet_filename, weight_dtype="default")
54
-
55
- print(" - Loading CLIP...")
56
- clip_tuple = clip_loader.load_clip(clip_name=clip_filename, type="lumina2", device="default")
57
-
58
- print(" - Loading VAE...")
59
- vae_tuple = vae_loader.load_vae(vae_name=vae_filename)
60
-
61
- unet_object = get_value_at_index(unet_tuple, 0)
62
- clip_object = get_value_at_index(clip_tuple, 0)
63
-
64
- if active_loras:
65
- print(f"--- [ModelManager] Applying {len(active_loras)} LoRAs on CPU... ---")
66
- lora_loader = LoraLoader()
67
- patched_unet, patched_clip = unet_object, clip_object
68
-
69
- for lora_info in active_loras:
70
- patched_unet, patched_clip = lora_loader.load_lora(
71
- model=patched_unet,
72
- clip=patched_clip,
73
- lora_name=lora_info["lora_name"],
74
- strength_model=lora_info["strength_model"],
75
- strength_clip=lora_info["strength_clip"]
76
- )
77
-
78
- unet_object = patched_unet
79
- clip_object = patched_clip
80
- print(f"--- [ModelManager] ✅ All LoRAs merged into the model on CPU. ---")
81
-
82
- loaded_combo = {
83
- "unet": (unet_object,),
84
- "clip": (clip_object,),
85
- "vae": vae_tuple,
86
- }
87
-
88
- print(f"--- [ModelManager] ✅ Successfully loaded combo '{display_name}' to CPU/RAM ---")
89
- return loaded_combo
90
-
91
- def move_models_to_gpu(self, required_models: List[str]):
92
- print(f"--- [ModelManager] Moving models to GPU: {required_models} ---")
93
- models_to_load_gpu = []
94
- for name in required_models:
95
- if name in self.loaded_models:
96
- model_combo = self.loaded_models[name]
97
- models_to_load_gpu.append(get_value_at_index(model_combo.get("unet"), 0))
98
-
99
- if models_to_load_gpu:
100
- model_management.load_models_gpu(models_to_load_gpu)
101
- print("--- [ModelManager] ✅ Models successfully moved to GPU. ---")
102
- else:
103
- print("--- [ModelManager] ⚠️ No component models found to move to GPU. ---")
104
-
105
  def ensure_models_downloaded(self, required_models: List[str], progress):
106
  print(f"--- [ModelManager] Ensuring models are downloaded: {required_models} ---")
107
 
@@ -109,8 +28,9 @@ class ModelManager:
109
  for display_name in required_models:
110
  if display_name in ALL_MODEL_MAP:
111
  _, components, _, _ = ALL_MODEL_MAP[display_name]
112
- for component_file in components.values():
113
- files_to_download.add(component_file)
 
114
 
115
  files_to_download = list(files_to_download)
116
  total_files = len(files_to_download)
@@ -125,44 +45,4 @@ class ModelManager:
125
 
126
  print(f"--- [ModelManager] ✅ All required models are present on disk. ---")
127
 
128
- def load_managed_models(self, required_models: List[str], active_loras: List[Dict[str, Any]], progress) -> Dict[str, Any]:
129
- required_set = set(required_models)
130
- current_set = set(self.loaded_models.keys())
131
-
132
- loras_changed = active_loras != self.last_active_loras
133
-
134
- models_to_unload = current_set - required_set
135
- if models_to_unload or loras_changed:
136
- if models_to_unload:
137
- print(f"--- [ModelManager] Models to unload: {models_to_unload} ---")
138
- if loras_changed and not models_to_unload:
139
- models_to_unload = current_set.intersection(required_set)
140
- if active_loras:
141
- print(f"--- [ModelManager] LoRA configuration changed. Reloading base model(s): {models_to_unload} ---")
142
- else:
143
- print(f"--- [ModelManager] LoRAs removed. Reloading base model(s) to clear patches: {models_to_unload} ---")
144
-
145
- model_management.unload_all_models()
146
- self.loaded_models.clear()
147
- gc.collect()
148
- torch.cuda.empty_cache()
149
- print("--- [ModelManager] All models unloaded to free RAM. ---")
150
-
151
- models_to_load = required_set if (models_to_unload or loras_changed) else required_set - current_set
152
-
153
- if models_to_load:
154
- print(f"--- [ModelManager] Models to load: {models_to_load} ---")
155
- for i, display_name in enumerate(models_to_load):
156
- progress(i / len(models_to_load), desc=f"Loading model: {display_name}")
157
- try:
158
- loaded_model_data = self._load_model_combo(display_name, active_loras, progress)
159
- self.loaded_models[display_name] = loaded_model_data
160
- except Exception as e:
161
- raise gr.Error(f"Failed to load model combo or apply LoRA for '{display_name}'. Reason: {e}")
162
- else:
163
- print(f"--- [ModelManager] All required models are already loaded. ---")
164
-
165
- self.last_active_loras = active_loras
166
- return {name: self.loaded_models[name] for name in required_models}
167
-
168
  model_manager = ModelManager()
 
1
  import gc
2
+ from typing import List
3
 
 
4
  import gradio as gr
 
5
 
6
+ from core.settings import ALL_MODEL_MAP
7
+ from utils.app_utils import _ensure_model_downloaded
 
 
8
 
9
 
10
  class ModelManager:
 
18
  def __init__(self):
19
  if hasattr(self, 'initialized'):
20
  return
 
 
21
  self.initialized = True
22
  print("✅ ModelManager initialized.")
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def ensure_models_downloaded(self, required_models: List[str], progress):
25
  print(f"--- [ModelManager] Ensuring models are downloaded: {required_models} ---")
26
 
 
28
  for display_name in required_models:
29
  if display_name in ALL_MODEL_MAP:
30
  _, components, _, _ = ALL_MODEL_MAP[display_name]
31
+ for component_key, component_file in components.items():
32
+ if component_key in ['unet', 'clip', 'vae', 'lora']:
33
+ files_to_download.add(component_file)
34
 
35
  files_to_download = list(files_to_download)
36
  total_files = len(files_to_download)
 
45
 
46
  print(f"--- [ModelManager] ✅ All required models are present on disk. ---")
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  model_manager = ModelManager()
core/pipelines/sd_image_pipeline.py CHANGED
@@ -116,25 +116,9 @@ class SdImagePipeline(BasePipeline):
116
  def _gpu_logic(self, ui_inputs: Dict, loras_string: str, required_models_for_gpu: List[str], workflow: Dict[str, Any], assembler: WorkflowAssembler, progress=gr.Progress(track_tqdm=True)):
117
  model_display_name = ui_inputs['model_display_name']
118
 
119
- progress(0.1, desc="Moving models to GPU...")
120
- self.model_manager.move_models_to_gpu(required_models_for_gpu)
121
-
122
  progress(0.4, desc="Executing workflow...")
123
 
124
- loaded_model_combo = self.model_manager.loaded_models[model_display_name]
125
-
126
  initial_objects = {}
127
-
128
- unet_loader_id = assembler.node_map.get("unet_loader")
129
- clip_loader_id = assembler.node_map.get("clip_loader")
130
- vae_loader_id = assembler.node_map.get("vae_loader")
131
-
132
- if unet_loader_id: initial_objects[unet_loader_id] = loaded_model_combo.get("unet")
133
- if clip_loader_id: initial_objects[clip_loader_id] = loaded_model_combo.get("clip")
134
- if vae_loader_id: initial_objects[vae_loader_id] = loaded_model_combo.get("vae")
135
-
136
- if not all([unet_loader_id, clip_loader_id, vae_loader_id]):
137
- raise RuntimeError("Workflow is missing one or more required loaders (unet_loader, clip_loader, vae_loader).")
138
 
139
  decoded_images_tensor = self._execute_workflow(workflow, initial_objects=initial_objects)
140
 
@@ -172,25 +156,24 @@ class SdImagePipeline(BasePipeline):
172
 
173
  lora_data = ui_inputs.get('lora_data', [])
174
  active_loras_for_gpu, active_loras_for_meta = [], []
175
- sources, ids, scales, files = lora_data[0::4], lora_data[1::4], lora_data[2::4], lora_data[3::4]
176
 
177
- for i, (source, lora_id, scale, _) in enumerate(zip(sources, ids, scales, files)):
178
- if scale > 0 and lora_id and lora_id.strip():
179
- lora_filename = None
180
- if source == "File":
181
- lora_filename = sanitize_filename(lora_id)
182
- elif source == "Civitai":
183
- local_path, status = get_lora_path(source, lora_id, ui_inputs['civitai_api_key'], progress)
184
- if local_path: lora_filename = os.path.basename(local_path)
185
- else: raise gr.Error(f"Failed to prepare LoRA {lora_id}: {status}")
186
-
187
- if lora_filename:
188
- active_loras_for_gpu.append({"lora_name": lora_filename, "strength_model": scale, "strength_clip": scale})
189
- active_loras_for_meta.append(f"{source} {lora_id}:{scale}")
 
 
 
190
 
191
- progress(0.1, desc="Loading models into RAM...")
192
- self.model_manager.load_managed_models(required_models, active_loras=active_loras_for_gpu, progress=progress)
193
-
194
  ui_inputs['denoise'] = 1.0
195
  if task_type == 'img2img': ui_inputs['denoise'] = ui_inputs.get('img2img_denoise', 0.7)
196
  elif task_type == 'hires_fix': ui_inputs['denoise'] = ui_inputs.get('hires_denoise', 0.55)
@@ -275,19 +258,20 @@ class SdImagePipeline(BasePipeline):
275
 
276
  controlnet_data = ui_inputs.get('controlnet_data', [])
277
  active_controlnets = []
278
- (cn_images, _, _, cn_strengths, cn_filepaths) = [controlnet_data[i::5] for i in range(5)]
279
- for i in range(len(cn_images)):
280
- if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
281
- ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
282
-
283
- if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
284
- cn_temp_path = os.path.join(INPUT_DIR, f"temp_cn_{i}_{random.randint(1000, 9999)}.png")
285
- cn_images[i].save(cn_temp_path, "PNG")
286
- temp_files_to_clean.append(cn_temp_path)
287
- active_controlnets.append({
288
- "image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
289
- "start_percent": 0.0, "end_percent": 1.0, "control_net_name": cn_filepaths[i]
290
- })
 
291
 
292
  diffsynth_controlnet_data = ui_inputs.get('diffsynth_controlnet_data', [])
293
  active_diffsynth_controlnets = []
@@ -378,6 +362,7 @@ class SdImagePipeline(BasePipeline):
378
  "unet_name": components['unet'],
379
  "clip_name": components['clip'],
380
  "vae_name": ui_inputs.get('vae_name', components['vae']),
 
381
  "controlnet_chain": active_controlnets,
382
  "diffsynth_controlnet_chain": active_diffsynth_controlnets,
383
  "conditioning_chain": active_conditioning,
@@ -404,6 +389,43 @@ class SdImagePipeline(BasePipeline):
404
  assembler=assembler,
405
  progress=progress
406
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407
  finally:
408
  for temp_file in temp_files_to_clean:
409
  if temp_file and os.path.exists(temp_file):
 
116
  def _gpu_logic(self, ui_inputs: Dict, loras_string: str, required_models_for_gpu: List[str], workflow: Dict[str, Any], assembler: WorkflowAssembler, progress=gr.Progress(track_tqdm=True)):
117
  model_display_name = ui_inputs['model_display_name']
118
 
 
 
 
119
  progress(0.4, desc="Executing workflow...")
120
 
 
 
121
  initial_objects = {}
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  decoded_images_tensor = self._execute_workflow(workflow, initial_objects=initial_objects)
124
 
 
156
 
157
  lora_data = ui_inputs.get('lora_data', [])
158
  active_loras_for_gpu, active_loras_for_meta = [], []
 
159
 
160
+ if lora_data:
161
+ sources, ids, scales, files = lora_data[0::4], lora_data[1::4], lora_data[2::4], lora_data[3::4]
162
+
163
+ for i, (source, lora_id, scale, _) in enumerate(zip(sources, ids, scales, files)):
164
+ if scale > 0 and lora_id and lora_id.strip():
165
+ lora_filename = None
166
+ if source == "File":
167
+ lora_filename = sanitize_filename(lora_id)
168
+ elif source == "Civitai":
169
+ local_path, status = get_lora_path(source, lora_id, ui_inputs['civitai_api_key'], progress)
170
+ if local_path: lora_filename = os.path.basename(local_path)
171
+ else: raise gr.Error(f"Failed to prepare LoRA {lora_id}: {status}")
172
+
173
+ if lora_filename:
174
+ active_loras_for_gpu.append({"lora_name": lora_filename, "strength_model": scale, "strength_clip": scale})
175
+ active_loras_for_meta.append(f"{source} {lora_id}:{scale}")
176
 
 
 
 
177
  ui_inputs['denoise'] = 1.0
178
  if task_type == 'img2img': ui_inputs['denoise'] = ui_inputs.get('img2img_denoise', 0.7)
179
  elif task_type == 'hires_fix': ui_inputs['denoise'] = ui_inputs.get('hires_denoise', 0.55)
 
258
 
259
  controlnet_data = ui_inputs.get('controlnet_data', [])
260
  active_controlnets = []
261
+ if controlnet_data:
262
+ (cn_images, _, _, cn_strengths, cn_filepaths) = [controlnet_data[i::5] for i in range(5)]
263
+ for i in range(len(cn_images)):
264
+ if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
265
+ ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
266
+
267
+ if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
268
+ cn_temp_path = os.path.join(INPUT_DIR, f"temp_cn_{i}_{random.randint(1000, 9999)}.png")
269
+ cn_images[i].save(cn_temp_path, "PNG")
270
+ temp_files_to_clean.append(cn_temp_path)
271
+ active_controlnets.append({
272
+ "image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
273
+ "start_percent": 0.0, "end_percent": 1.0, "control_net_name": cn_filepaths[i]
274
+ })
275
 
276
  diffsynth_controlnet_data = ui_inputs.get('diffsynth_controlnet_data', [])
277
  active_diffsynth_controlnets = []
 
362
  "unet_name": components['unet'],
363
  "clip_name": components['clip'],
364
  "vae_name": ui_inputs.get('vae_name', components['vae']),
365
+ "lora_chain": active_loras_for_gpu,
366
  "controlnet_chain": active_controlnets,
367
  "diffsynth_controlnet_chain": active_diffsynth_controlnets,
368
  "conditioning_chain": active_conditioning,
 
389
  assembler=assembler,
390
  progress=progress
391
  )
392
+
393
+ import json
394
+ import glob
395
+ from PIL import PngImagePlugin
396
+
397
+ prompt_json = json.dumps(workflow)
398
+
399
+ out_dir = os.path.abspath(OUTPUT_DIR)
400
+ os.makedirs(out_dir, exist_ok=True)
401
+
402
+ try:
403
+ existing_files = glob.glob(os.path.join(out_dir, "gen_*.png"))
404
+ existing_files.sort(key=os.path.getmtime)
405
+ while len(existing_files) > 50:
406
+ os.remove(existing_files.pop(0))
407
+ except Exception as e:
408
+ print(f"Warning: Failed to cleanup output dir: {e}")
409
+
410
+ final_results = []
411
+ for img in results:
412
+ if not isinstance(img, Image.Image):
413
+ final_results.append(img)
414
+ continue
415
+
416
+ metadata = PngImagePlugin.PngInfo()
417
+ params_string = img.info.get("parameters", "")
418
+ if params_string:
419
+ metadata.add_text("parameters", params_string)
420
+ metadata.add_text("prompt", prompt_json)
421
+
422
+ filename = f"gen_{random.randint(1000000, 9999999)}.png"
423
+ filepath = os.path.join(out_dir, filename)
424
+ img.save(filepath, "PNG", pnginfo=metadata)
425
+ final_results.append(filepath)
426
+
427
+ results = final_results
428
+
429
  finally:
430
  for temp_file in temp_files_to_clean:
431
  if temp_file and os.path.exists(temp_file):
core/pipelines/workflow_recipes/_partials/conditioning/z-image.yaml CHANGED
@@ -45,6 +45,19 @@ connections:
45
  - from: "vae_loader:0"
46
  to: "vae_encode:vae"
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  dynamic_diffsynth_controlnet_chains:
49
  diffsynth_controlnet_chain:
50
  template: "QwenImageDiffsynthControlnet"
 
45
  - from: "vae_loader:0"
46
  to: "vae_encode:vae"
47
 
48
+ dynamic_lora_chains:
49
+ lora_chain:
50
+ template: "LoraLoader"
51
+ output_map:
52
+ "unet_loader:0": "model"
53
+ "clip_loader:0": "clip"
54
+ input_map:
55
+ "model": "model"
56
+ "clip": "clip"
57
+ end_input_map:
58
+ "model": ["model_sampler:model"]
59
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
60
+
61
  dynamic_diffsynth_controlnet_chains:
62
  diffsynth_controlnet_chain:
63
  template: "QwenImageDiffsynthControlnet"
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
- comfyui-frontend-package==1.41.20
2
- comfyui-workflow-templates==0.9.21
3
  comfyui-embedded-docs==0.4.3
4
  torch
5
  torchsde
 
1
+ comfyui-frontend-package==1.42.10
2
+ comfyui-workflow-templates==0.9.47
3
  comfyui-embedded-docs==0.4.3
4
  torch
5
  torchsde
yaml/injectors.yaml CHANGED
@@ -1,4 +1,6 @@
1
  injector_definitions:
 
 
2
  dynamic_controlnet_chains:
3
  module: "chain_injectors.controlnet_injector"
4
  dynamic_diffsynth_controlnet_chains:
@@ -7,6 +9,7 @@ injector_definitions:
7
  module: "chain_injectors.conditioning_injector"
8
 
9
  injector_order:
 
10
  - dynamic_conditioning_chains
11
  - dynamic_diffsynth_controlnet_chains
12
  - dynamic_controlnet_chains
 
1
  injector_definitions:
2
+ dynamic_lora_chains:
3
+ module: "chain_injectors.lora_injector"
4
  dynamic_controlnet_chains:
5
  module: "chain_injectors.controlnet_injector"
6
  dynamic_diffsynth_controlnet_chains:
 
9
  module: "chain_injectors.conditioning_injector"
10
 
11
  injector_order:
12
+ - dynamic_lora_chains
13
  - dynamic_conditioning_chains
14
  - dynamic_diffsynth_controlnet_chains
15
  - dynamic_controlnet_chains