# Main real-world inference configuration seed: 42 max_layer_num: 52 # Size configuration source_size: 1024 target_size: 1024 # Real-world inference defaults. The Gradio demo and unified CLI override these # paths per uploaded image; direct `infer/infer.py` runs can point them to a # prepared local JSONL/image directory. data_dir: "demo/outputs/real_world_demo" image_dir: "demo/outputs/real_world_demo/layers_real_test_1024" test_jsonl: "demo/outputs/real_world_demo/caption_bbox_infer.jsonl" # Model paths pretrained_model_name_or_path: "SynLayers_checkpoints/FLUX.1-dev" pretrained_adapter_path: "SynLayers_checkpoints/FLUX.1-dev-Controlnet-Inpainting-Alpha" transp_vae_path: "ckpt/trans_vae/0008000.pt" # Pre-trained LoRA weights pretrained_lora_dir: "ckpt/pre_trained_LoRA" artplus_lora_dir: "ckpt/prism_ft_LoRA" # unified real-world decomposition checkpoint lora_ckpt: "SynLayers_ckpt/step_120000/transformer" layer_ckpt: "SynLayers_ckpt/step_120000" adapter_lora_dir: "SynLayers_ckpt/step_120000/adapter" # Inference settings cfg: 4.0 adapter_scale: 0.9 max_sequence_length: 1024 save_dir: "demo/outputs/real_world_infer/results" #run_name: "step_120000" # optional manual override # Sample range control (1-based indexing) start_idx: 1 #end_idx: 147 #max_samples: 147