Upload flux_space_model_manager.py with huggingface_hub
Browse files- flux_space_model_manager.py +56 -0
flux_space_model_manager.py
CHANGED
|
@@ -33,6 +33,17 @@ class FluxModelManager:
|
|
| 33 |
}
|
| 34 |
}
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
self.current_model = None
|
| 37 |
self.current_pipeline = None
|
| 38 |
self.loaded_loras = {}
|
|
@@ -186,6 +197,51 @@ class FluxModelManager:
|
|
| 186 |
}
|
| 187 |
|
| 188 |
return result.images[0], generation_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
# Example usage for Gradio integration
|
| 191 |
def create_model_manager():
|
|
|
|
| 33 |
}
|
| 34 |
}
|
| 35 |
|
| 36 |
+
# Pre-loaded LoRA models from annoyingpixel
|
| 37 |
+
self.preloaded_loras = {
|
| 38 |
+
'T11-Ultra-Portrait-E04': {
|
| 39 |
+
'repo_id': 'annoyingpixel/T11-Ultra-Portrait.E04.Lora.TA',
|
| 40 |
+
'model_id': 'annoyingpixel/T11-Ultra-Portrait.E04.Lora.TA',
|
| 41 |
+
'description': 'Ultra Portrait LoRA for FLUX.1-dev',
|
| 42 |
+
'trigger_words': 'T11-Ultra-Portrait-E04',
|
| 43 |
+
'base_model': 'black-forest-labs/FLUX.1-dev'
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
self.current_model = None
|
| 48 |
self.current_pipeline = None
|
| 49 |
self.loaded_loras = {}
|
|
|
|
| 197 |
}
|
| 198 |
|
| 199 |
return result.images[0], generation_info
|
| 200 |
+
|
| 201 |
+
def load_preloaded_lora(self, lora_name: str, strength: float = 1.0) -> bool:
|
| 202 |
+
"""
|
| 203 |
+
Load a pre-loaded LoRA directly from Hugging Face
|
| 204 |
+
"""
|
| 205 |
+
if lora_name not in self.preloaded_loras:
|
| 206 |
+
print(f"❌ Pre-loaded LoRA '{lora_name}' not found")
|
| 207 |
+
return False
|
| 208 |
+
|
| 209 |
+
if self.current_pipeline is None:
|
| 210 |
+
print("❌ No model loaded. Load a model first.")
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
try:
|
| 214 |
+
print(f"🔄 Loading pre-loaded LoRA: {lora_name}")
|
| 215 |
+
lora_info = self.preloaded_loras[lora_name]
|
| 216 |
+
|
| 217 |
+
# Load LoRA directly from HF repository
|
| 218 |
+
self.current_pipeline.load_lora_weights(
|
| 219 |
+
lora_info['model_id'],
|
| 220 |
+
weight_name="default",
|
| 221 |
+
adapter_name=lora_name
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
# Store LoRA info
|
| 225 |
+
self.loaded_loras[lora_name] = {
|
| 226 |
+
'path': lora_info['model_id'],
|
| 227 |
+
'strength': strength,
|
| 228 |
+
'trigger_words': lora_info.get('trigger_words', ''),
|
| 229 |
+
'description': lora_info['description']
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
print(f"✅ Pre-loaded LoRA '{lora_name}' loaded with strength {strength}")
|
| 233 |
+
print(f"📝 Trigger words: {lora_info.get('trigger_words', 'None')}")
|
| 234 |
+
return True
|
| 235 |
+
|
| 236 |
+
except Exception as e:
|
| 237 |
+
print(f"❌ Error loading pre-loaded LoRA: {e}")
|
| 238 |
+
return False
|
| 239 |
+
|
| 240 |
+
def get_preloaded_loras(self) -> Dict:
|
| 241 |
+
"""
|
| 242 |
+
Get information about available pre-loaded LoRAs
|
| 243 |
+
"""
|
| 244 |
+
return self.preloaded_loras
|
| 245 |
|
| 246 |
# Example usage for Gradio integration
|
| 247 |
def create_model_manager():
|