Update README.md
Browse files
README.md
CHANGED
|
@@ -61,6 +61,8 @@ Text-to-image diffusion models have achieved remarkable progress in recent years
|
|
| 61 |
+ transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=torch.bfloat16)
|
| 62 |
- pipe = FluxPipeline.from_pretrained(bfl_repo, torch_dtype=torch.bfloat16)
|
| 63 |
+ pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=transformer, torch_dtype=torch.bfloat16)
|
|
|
|
|
|
|
| 64 |
pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
|
| 65 |
|
| 66 |
+ pipe.load_lora_weights("Huage001/URAE", weight_name="urae_2k_adapter.safetensors")
|
|
|
|
| 61 |
+ transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=torch.bfloat16)
|
| 62 |
- pipe = FluxPipeline.from_pretrained(bfl_repo, torch_dtype=torch.bfloat16)
|
| 63 |
+ pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=transformer, torch_dtype=torch.bfloat16)
|
| 64 |
+
+ pipe.scheduler.config.use dynamic_shifting = False
|
| 65 |
+
+ pipe.scheduler.config.time shift = 10
|
| 66 |
pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
|
| 67 |
|
| 68 |
+ pipe.load_lora_weights("Huage001/URAE", weight_name="urae_2k_adapter.safetensors")
|