Image-Text-to-Text
Transformers
Diffusers
Safetensors
qwen3_vl
vision-language-model
image-decomposition
conversational
Instructions to use SynLayers/Bbox-caption-8b with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use SynLayers/Bbox-caption-8b with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-text-to-text", model="SynLayers/Bbox-caption-8b") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] pipe(text=messages)# Load model directly from transformers import AutoProcessor, AutoModelForImageTextToText processor = AutoProcessor.from_pretrained("SynLayers/Bbox-caption-8b") model = AutoModelForImageTextToText.from_pretrained("SynLayers/Bbox-caption-8b") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use SynLayers/Bbox-caption-8b with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "SynLayers/Bbox-caption-8b" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "SynLayers/Bbox-caption-8b", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker
docker model run hf.co/SynLayers/Bbox-caption-8b
- SGLang
How to use SynLayers/Bbox-caption-8b with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "SynLayers/Bbox-caption-8b" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "SynLayers/Bbox-caption-8b", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "SynLayers/Bbox-caption-8b" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "SynLayers/Bbox-caption-8b", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }' - Docker Model Runner
How to use SynLayers/Bbox-caption-8b with Docker Model Runner:
docker model run hf.co/SynLayers/Bbox-caption-8b
Upload demo/real_world_pipeline.py with huggingface_hub
Browse files- demo/real_world_pipeline.py +21 -4
demo/real_world_pipeline.py
CHANGED
|
@@ -23,6 +23,7 @@ from demo.infer.run_caption_bbox_infer import ( # noqa: E402
|
|
| 23 |
draw_boxes,
|
| 24 |
infer_caption_bbox,
|
| 25 |
)
|
|
|
|
| 26 |
from demo.infer.vlm_bbox_inference import get_model_and_processor # noqa: E402
|
| 27 |
from infer.common_infer import initialize_pipeline # noqa: E402
|
| 28 |
from infer.infer import build_run_save_dir, get_real_boxes, load_adapter_image # noqa: E402
|
|
@@ -133,37 +134,48 @@ def build_runtime_config(
|
|
| 133 |
seed: int | None = None,
|
| 134 |
) -> dict:
|
| 135 |
config = load_config(str(config_path))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
config["data_dir"] = str(image_dir.parent)
|
| 137 |
config["image_dir"] = str(image_dir)
|
| 138 |
config["test_jsonl"] = str(bbox_jsonl)
|
| 139 |
config["save_dir"] = str(results_root)
|
| 140 |
config["run_name"] = run_name
|
| 141 |
-
config["lora_ckpt"] = str(
|
| 142 |
-
config["layer_ckpt"] = str(
|
| 143 |
-
config["adapter_lora_dir"] = str(
|
| 144 |
|
| 145 |
env_overrides = {
|
| 146 |
"pretrained_model_name_or_path": (
|
| 147 |
os.environ.get("SYNLAYERS_BASE_MODEL")
|
|
|
|
| 148 |
or resolve_existing_path(PROJECT_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev")
|
| 149 |
or "black-forest-labs/FLUX.1-dev"
|
| 150 |
),
|
| 151 |
"pretrained_adapter_path": (
|
| 152 |
os.environ.get("SYNLAYERS_ADAPTER_MODEL")
|
|
|
|
| 153 |
or resolve_existing_path(
|
| 154 |
PROJECT_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev-Controlnet-Inpainting-Alpha"
|
| 155 |
)
|
| 156 |
),
|
| 157 |
"transp_vae_path": (
|
| 158 |
os.environ.get("SYNLAYERS_TRANSP_VAE")
|
|
|
|
| 159 |
or resolve_existing_path(PROJECT_ROOT / "ckpt" / "trans_vae" / "0008000.pt")
|
| 160 |
),
|
| 161 |
"pretrained_lora_dir": (
|
| 162 |
os.environ.get("SYNLAYERS_PRETRAINED_LORA")
|
|
|
|
| 163 |
or resolve_existing_path(PROJECT_ROOT / "ckpt" / "pre_trained_LoRA")
|
| 164 |
),
|
| 165 |
"artplus_lora_dir": (
|
| 166 |
os.environ.get("SYNLAYERS_ARTPLUS_LORA")
|
|
|
|
| 167 |
or resolve_existing_path(PROJECT_ROOT / "ckpt" / "prism_ft_LoRA")
|
| 168 |
),
|
| 169 |
}
|
|
@@ -338,7 +350,12 @@ def run_real_world_pipeline(
|
|
| 338 |
if not image_path.exists():
|
| 339 |
raise FileNotFoundError(f"Input image not found: {image_path}")
|
| 340 |
|
| 341 |
-
bbox_model =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
config_path = Path(config_path or os.environ.get("SYNLAYERS_REAL_CONFIG", str(DEFAULT_REAL_CONFIG_PATH)))
|
| 343 |
work_dir = Path(work_dir or os.environ.get("SYNLAYERS_DEMO_WORK_DIR", str(DEFAULT_WORK_DIR)))
|
| 344 |
|
|
|
|
| 23 |
draw_boxes,
|
| 24 |
infer_caption_bbox,
|
| 25 |
)
|
| 26 |
+
from demo.hf_repo_assets import build_repo_asset_overrides # noqa: E402
|
| 27 |
from demo.infer.vlm_bbox_inference import get_model_and_processor # noqa: E402
|
| 28 |
from infer.common_infer import initialize_pipeline # noqa: E402
|
| 29 |
from infer.infer import build_run_save_dir, get_real_boxes, load_adapter_image # noqa: E402
|
|
|
|
| 134 |
seed: int | None = None,
|
| 135 |
) -> dict:
|
| 136 |
config = load_config(str(config_path))
|
| 137 |
+
repo_overrides = build_repo_asset_overrides(os.environ.get("SYNLAYERS_MODEL_REPO"))
|
| 138 |
+
decomp_ckpt_root = Path(
|
| 139 |
+
os.environ.get("SYNLAYERS_DECOMP_CKPT_ROOT")
|
| 140 |
+
or repo_overrides.get("decomp_ckpt_root")
|
| 141 |
+
or DEFAULT_DECOMP_CKPT_ROOT
|
| 142 |
+
)
|
| 143 |
config["data_dir"] = str(image_dir.parent)
|
| 144 |
config["image_dir"] = str(image_dir)
|
| 145 |
config["test_jsonl"] = str(bbox_jsonl)
|
| 146 |
config["save_dir"] = str(results_root)
|
| 147 |
config["run_name"] = run_name
|
| 148 |
+
config["lora_ckpt"] = str(decomp_ckpt_root / "transformer")
|
| 149 |
+
config["layer_ckpt"] = str(decomp_ckpt_root)
|
| 150 |
+
config["adapter_lora_dir"] = str(decomp_ckpt_root / "adapter")
|
| 151 |
|
| 152 |
env_overrides = {
|
| 153 |
"pretrained_model_name_or_path": (
|
| 154 |
os.environ.get("SYNLAYERS_BASE_MODEL")
|
| 155 |
+
or repo_overrides.get("pretrained_model_name_or_path")
|
| 156 |
or resolve_existing_path(PROJECT_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev")
|
| 157 |
or "black-forest-labs/FLUX.1-dev"
|
| 158 |
),
|
| 159 |
"pretrained_adapter_path": (
|
| 160 |
os.environ.get("SYNLAYERS_ADAPTER_MODEL")
|
| 161 |
+
or repo_overrides.get("pretrained_adapter_path")
|
| 162 |
or resolve_existing_path(
|
| 163 |
PROJECT_ROOT / "SynLayers_checkpoints" / "FLUX.1-dev-Controlnet-Inpainting-Alpha"
|
| 164 |
)
|
| 165 |
),
|
| 166 |
"transp_vae_path": (
|
| 167 |
os.environ.get("SYNLAYERS_TRANSP_VAE")
|
| 168 |
+
or repo_overrides.get("transp_vae_path")
|
| 169 |
or resolve_existing_path(PROJECT_ROOT / "ckpt" / "trans_vae" / "0008000.pt")
|
| 170 |
),
|
| 171 |
"pretrained_lora_dir": (
|
| 172 |
os.environ.get("SYNLAYERS_PRETRAINED_LORA")
|
| 173 |
+
or repo_overrides.get("pretrained_lora_dir")
|
| 174 |
or resolve_existing_path(PROJECT_ROOT / "ckpt" / "pre_trained_LoRA")
|
| 175 |
),
|
| 176 |
"artplus_lora_dir": (
|
| 177 |
os.environ.get("SYNLAYERS_ARTPLUS_LORA")
|
| 178 |
+
or repo_overrides.get("artplus_lora_dir")
|
| 179 |
or resolve_existing_path(PROJECT_ROOT / "ckpt" / "prism_ft_LoRA")
|
| 180 |
),
|
| 181 |
}
|
|
|
|
| 350 |
if not image_path.exists():
|
| 351 |
raise FileNotFoundError(f"Input image not found: {image_path}")
|
| 352 |
|
| 353 |
+
bbox_model = (
|
| 354 |
+
bbox_model
|
| 355 |
+
or os.environ.get("SYNLAYERS_BBOX_MODEL")
|
| 356 |
+
or os.environ.get("SYNLAYERS_MODEL_REPO")
|
| 357 |
+
or DEFAULT_BBOX_MODEL
|
| 358 |
+
)
|
| 359 |
config_path = Path(config_path or os.environ.get("SYNLAYERS_REAL_CONFIG", str(DEFAULT_REAL_CONFIG_PATH)))
|
| 360 |
work_dir = Path(work_dir or os.environ.get("SYNLAYERS_DEMO_WORK_DIR", str(DEFAULT_WORK_DIR)))
|
| 361 |
|