SynLayers commited on
Commit
7be8536
·
1 Parent(s): a9d3a48

change path

Browse files
app.py CHANGED
@@ -34,6 +34,7 @@ if str(PROJECT_ROOT) not in sys.path:
34
 
35
  from demo.real_world_pipeline import ( # noqa: E402
36
  DEFAULT_BBOX_MODEL,
 
37
  DEFAULT_REAL_CONFIG_PATH,
38
  DEFAULT_RUN_NAME,
39
  DEFAULT_WORK_DIR,
@@ -43,7 +44,7 @@ from demo.real_world_pipeline import ( # noqa: E402
43
  DEFAULT_EXAMPLE_DIR = Path(
44
  os.environ.get(
45
  "SYNLAYERS_EXAMPLE_DIR",
46
- "/project/llmsvgen/share/data/kmw_layered_dataset/real_world_inference/layers_real_test_1024",
47
  )
48
  )
49
 
@@ -106,7 +107,7 @@ def is_zero_gpu_space() -> bool:
106
  def get_runtime_status_markdown() -> str:
107
  accelerator = os.environ.get("ACCELERATOR", "unknown")
108
  space_id = os.environ.get("SPACE_ID", "local")
109
- model_repo = os.environ.get("SYNLAYERS_MODEL_REPO", "(unset)")
110
  zero_gpu_enabled = is_zero_gpu_space()
111
 
112
  lines = ["## Runtime Status", f"- `SPACE_ID`: `{space_id}`", f"- `ACCELERATOR`: `{accelerator}`"]
 
34
 
35
  from demo.real_world_pipeline import ( # noqa: E402
36
  DEFAULT_BBOX_MODEL,
37
+ DEFAULT_MODEL_REPO_ID,
38
  DEFAULT_REAL_CONFIG_PATH,
39
  DEFAULT_RUN_NAME,
40
  DEFAULT_WORK_DIR,
 
44
  DEFAULT_EXAMPLE_DIR = Path(
45
  os.environ.get(
46
  "SYNLAYERS_EXAMPLE_DIR",
47
+ str(PROJECT_ROOT / "demo" / "examples"),
48
  )
49
  )
50
 
 
107
  def get_runtime_status_markdown() -> str:
108
  accelerator = os.environ.get("ACCELERATOR", "unknown")
109
  space_id = os.environ.get("SPACE_ID", "local")
110
+ model_repo = os.environ.get("SYNLAYERS_MODEL_REPO") or DEFAULT_MODEL_REPO_ID
111
  zero_gpu_enabled = is_zero_gpu_space()
112
 
113
  lines = ["## Runtime Status", f"- `SPACE_ID`: `{space_id}`", f"- `ACCELERATOR`: `{accelerator}`"]
demo/hf_repo_assets.py CHANGED
@@ -6,9 +6,11 @@ from pathlib import Path
6
 
7
  from huggingface_hub import snapshot_download
8
 
 
 
9
 
10
  def get_model_repo_id() -> str | None:
11
- return os.environ.get("SYNLAYERS_MODEL_REPO")
12
 
13
 
14
  def get_cache_dir() -> str | None:
 
6
 
7
  from huggingface_hub import snapshot_download
8
 
9
+ DEFAULT_MODEL_REPO_ID = "SynLayers/Bbox-caption-8b"
10
+
11
 
12
  def get_model_repo_id() -> str | None:
13
+ return os.environ.get("SYNLAYERS_MODEL_REPO") or DEFAULT_MODEL_REPO_ID
14
 
15
 
16
  def get_cache_dir() -> str | None:
demo/infer/run_caption_bbox_infer.py CHANGED
@@ -21,20 +21,14 @@ except ImportError:
21
  )
22
 
23
  PROJECT_ROOT = Path(__file__).resolve().parents[2]
 
24
 
25
 
26
  def resolve_default_bbox_model() -> str:
27
  env_path = os.environ.get("SYNLAYERS_BBOX_MODEL") or os.environ.get("SYNLAYERS_MODEL_REPO")
28
- candidates = [
29
- Path(env_path) if env_path else None,
30
- PROJECT_ROOT if (PROJECT_ROOT / "config.json").exists() and (PROJECT_ROOT / "tokenizer_config.json").exists() else None,
31
- PROJECT_ROOT / "Bbox-caption-8b",
32
- Path("/project/llmsvgen/share/data/kmw_layered_checkpoint/Bbox-caption-8b"),
33
- ]
34
- for candidate in candidates:
35
- if candidate and candidate.exists():
36
- return str(candidate)
37
- return str(Path("/project/llmsvgen/share/data/kmw_layered_checkpoint/Bbox-caption-8b"))
38
 
39
 
40
  CAPTION_BBOX_PROMPT_TOP_LEFT = (
 
21
  )
22
 
23
  PROJECT_ROOT = Path(__file__).resolve().parents[2]
24
+ DEFAULT_MODEL_REPO_ID = "SynLayers/Bbox-caption-8b"
25
 
26
 
27
  def resolve_default_bbox_model() -> str:
28
  env_path = os.environ.get("SYNLAYERS_BBOX_MODEL") or os.environ.get("SYNLAYERS_MODEL_REPO")
29
+ if env_path and Path(env_path).exists():
30
+ return str(Path(env_path))
31
+ return env_path or DEFAULT_MODEL_REPO_ID
 
 
 
 
 
 
 
32
 
33
 
34
  CAPTION_BBOX_PROMPT_TOP_LEFT = (
demo/real_world_pipeline.py CHANGED
@@ -35,6 +35,7 @@ DEFAULT_REAL_CONFIG_PATH = PROJECT_ROOT / "infer" / "infer.yaml"
35
  DEFAULT_WORK_DIR = PROJECT_ROOT / "demo" / "outputs" / "real_world_demo"
36
  DEFAULT_RUN_NAME = "step_120000"
37
  DEFAULT_TARGET_SIZE = 1024
 
38
 
39
  _BBOX_CACHE: dict[str, object] = {"model_path": None, "model": None, "processor": None}
40
  _REAL_CACHE: dict[str, object] = {"key": None, "pipeline": None, "transp_vae": None}
@@ -59,9 +60,8 @@ DEFAULT_DECOMP_CKPT_ROOT = Path(
59
  resolve_existing_path(
60
  os.environ.get("SYNLAYERS_DECOMP_CKPT_ROOT"),
61
  PROJECT_ROOT / "SynLayers_ckpt" / "step_120000",
62
- "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_ckpt/step_120000",
63
  )
64
- or "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_ckpt/step_120000"
65
  )
66
 
67
 
@@ -148,7 +148,8 @@ def build_runtime_config(
148
  seed: int | None = None,
149
  ) -> dict:
150
  config = load_config(str(config_path))
151
- repo_overrides = build_repo_asset_overrides(os.environ.get("SYNLAYERS_MODEL_REPO"))
 
152
  decomp_ckpt_root = Path(
153
  os.environ.get("SYNLAYERS_DECOMP_CKPT_ROOT")
154
  or repo_overrides.get("decomp_ckpt_root")
 
35
  DEFAULT_WORK_DIR = PROJECT_ROOT / "demo" / "outputs" / "real_world_demo"
36
  DEFAULT_RUN_NAME = "step_120000"
37
  DEFAULT_TARGET_SIZE = 1024
38
+ DEFAULT_MODEL_REPO_ID = "SynLayers/Bbox-caption-8b"
39
 
40
  _BBOX_CACHE: dict[str, object] = {"model_path": None, "model": None, "processor": None}
41
  _REAL_CACHE: dict[str, object] = {"key": None, "pipeline": None, "transp_vae": None}
 
60
  resolve_existing_path(
61
  os.environ.get("SYNLAYERS_DECOMP_CKPT_ROOT"),
62
  PROJECT_ROOT / "SynLayers_ckpt" / "step_120000",
 
63
  )
64
+ or PROJECT_ROOT / "SynLayers_ckpt" / "step_120000"
65
  )
66
 
67
 
 
148
  seed: int | None = None,
149
  ) -> dict:
150
  config = load_config(str(config_path))
151
+ model_repo = os.environ.get("SYNLAYERS_MODEL_REPO") or DEFAULT_MODEL_REPO_ID
152
+ repo_overrides = build_repo_asset_overrides(model_repo)
153
  decomp_ckpt_root = Path(
154
  os.environ.get("SYNLAYERS_DECOMP_CKPT_ROOT")
155
  or repo_overrides.get("decomp_ckpt_root")
demo/upload_used_bundle_to_hf.py CHANGED
@@ -7,7 +7,7 @@ from pathlib import Path
7
  from huggingface_hub import HfApi
8
 
9
  PROJECT_ROOT = Path(__file__).resolve().parents[1]
10
- SHARE_ROOT = Path("/project/llmsvgen/share/data/kmw_layered_checkpoint")
11
 
12
  USED_FILE_MAP = {
13
  PROJECT_ROOT / "demo" / "model_card.md": "README.md",
@@ -47,9 +47,9 @@ REQUIRED_ASSET_FILES = {
47
  }
48
 
49
  REQUIRED_ASSET_FOLDERS = {
50
- SHARE_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev": "SynLayers_checkpoints/FLUX.1-dev",
51
- SHARE_ROOT / "SynLayers_ckpt" / "step_120000": "SynLayers_ckpt/step_120000",
52
- SHARE_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev-Controlnet-Inpainting-Alpha": "SynLayers_checkpoints/FLUX.1-dev-Controlnet-Inpainting-Alpha",
53
  }
54
 
55
 
 
7
  from huggingface_hub import HfApi
8
 
9
  PROJECT_ROOT = Path(__file__).resolve().parents[1]
10
+ ASSET_ROOT = Path(os.environ.get("SYNLAYERS_UPLOAD_ASSET_ROOT", PROJECT_ROOT))
11
 
12
  USED_FILE_MAP = {
13
  PROJECT_ROOT / "demo" / "model_card.md": "README.md",
 
47
  }
48
 
49
  REQUIRED_ASSET_FOLDERS = {
50
+ ASSET_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev": "SynLayers_checkpoints/FLUX.1-dev",
51
+ ASSET_ROOT / "SynLayers_ckpt" / "step_120000": "SynLayers_ckpt/step_120000",
52
+ ASSET_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev-Controlnet-Inpainting-Alpha": "SynLayers_checkpoints/FLUX.1-dev-Controlnet-Inpainting-Alpha",
53
  }
54
 
55
 
infer/infer.yaml CHANGED
@@ -6,56 +6,33 @@ max_layer_num: 52
6
  source_size: 1024
7
  target_size: 1024
8
 
9
- # Real-world inference defaults
10
- data_dir: "/project/llmsvgen/share/data/kmw_layered_dataset/real_world_inference"
11
- image_dir: "/project/llmsvgen/share/data/kmw_layered_dataset/real_world_inference/layers_real_test_1024"
12
- test_jsonl: "/project/llmsvgen/share/data/kmw_layered_dataset/real_world_inference/caption_bbox_infer.jsonl"
 
 
13
 
14
  # Model paths
15
- pretrained_model_name_or_path: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_checkpoints/FLUX.1-dev"
16
- pretrained_adapter_path: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_checkpoints/FLUX.1-dev-Controlnet-Inpainting-Alpha"
17
  transp_vae_path: "ckpt/trans_vae/0008000.pt"
18
 
19
  # Pre-trained LoRA weights
20
  pretrained_lora_dir: "ckpt/pre_trained_LoRA"
21
  artplus_lora_dir: "ckpt/prism_ft_LoRA"
22
 
23
- # below is for 18k dataset
24
- #lora_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_18k/step_90000/transformer"
25
- #layer_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_18k/step_90000"
26
- #adapter_lora_dir: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_18k/step_90000/adapter"
27
-
28
- # below is for 20k dataset
29
- #lora_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_20k/step_120000/transformer"
30
- #layer_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_20k/step_120000"
31
- #adapter_lora_dir: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_20k/step_120000/adapter"
32
-
33
- # below is for 30k dataset
34
- #lora_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_30k/step_150000/transformer"
35
- #layer_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_30k/step_150000"
36
- #adapter_lora_dir: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_30k/step_150000/adapter"
37
-
38
- # below is for 40k dataset
39
- #lora_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_40k/step_250000/transformer"
40
- #layer_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_40k/step_250000"
41
- #adapter_lora_dir: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_40k/step_250000/adapter"
42
-
43
- # below is for 50k dataset
44
- #lora_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_50k/step_200000/transformer"
45
- #layer_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_50k/step_200000"
46
- #adapter_lora_dir: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_train_dataset/ckpt_prism_scaleup_1024_50k/step_200000/adapter"
47
-
48
  # unified real-world decomposition checkpoint
49
- lora_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_ckpt/step_120000/transformer"
50
- layer_ckpt: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_ckpt/step_120000"
51
- adapter_lora_dir: "/project/llmsvgen/share/data/kmw_layered_checkpoint/SynLayers_ckpt/step_120000/adapter"
52
 
53
  # Inference settings
54
  cfg: 4.0
55
  adapter_scale: 0.9
56
  max_sequence_length: 1024
57
 
58
- save_dir: "/project/llmsvgen/share/data/kmw_layered_dataset/real_world_inference/results"
59
  #run_name: "step_120000" # optional manual override
60
 
61
  # Sample range control (1-based indexing)
 
6
  source_size: 1024
7
  target_size: 1024
8
 
9
+ # Real-world inference defaults. The Gradio demo and unified CLI override these
10
+ # paths per uploaded image; direct `infer/infer.py` runs can point them to a
11
+ # prepared local JSONL/image directory.
12
+ data_dir: "demo/outputs/real_world_demo"
13
+ image_dir: "demo/outputs/real_world_demo/layers_real_test_1024"
14
+ test_jsonl: "demo/outputs/real_world_demo/caption_bbox_infer.jsonl"
15
 
16
  # Model paths
17
+ pretrained_model_name_or_path: "SynLayers_checkpoints/FLUX.1-dev"
18
+ pretrained_adapter_path: "SynLayers_checkpoints/FLUX.1-dev-Controlnet-Inpainting-Alpha"
19
  transp_vae_path: "ckpt/trans_vae/0008000.pt"
20
 
21
  # Pre-trained LoRA weights
22
  pretrained_lora_dir: "ckpt/pre_trained_LoRA"
23
  artplus_lora_dir: "ckpt/prism_ft_LoRA"
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  # unified real-world decomposition checkpoint
26
+ lora_ckpt: "SynLayers_ckpt/step_120000/transformer"
27
+ layer_ckpt: "SynLayers_ckpt/step_120000"
28
+ adapter_lora_dir: "SynLayers_ckpt/step_120000/adapter"
29
 
30
  # Inference settings
31
  cfg: 4.0
32
  adapter_scale: 0.9
33
  max_sequence_length: 1024
34
 
35
+ save_dir: "demo/outputs/real_world_infer/results"
36
  #run_name: "step_120000" # optional manual override
37
 
38
  # Sample range control (1-based indexing)