Image-Text-to-Text
Transformers
Safetensors
English
internvl
vision-language-model
vlm
reasoning
perception
rlvr
grpo
icml-2026
conversational
Instructions to use UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-text-to-text", model="UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] pipe(text=messages)# Load model directly from transformers import AutoProcessor, AutoModelForImageTextToText processor = AutoProcessor.from_pretrained("UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged") model = AutoModelForImageTextToText.from_pretrained("UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker
docker model run hf.co/UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged
- SGLang
How to use UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }' - Docker Model Runner
How to use UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged with Docker Model Runner:
docker model run hf.co/UCSC-VLAA/VLM-CapCurriculum-InternVL3.5-8B-Staged
File size: 3,032 Bytes
5ddccd3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | {
"architectures": [
"InternVLForConditionalGeneration"
],
"downsample_ratio": 0.5,
"dtype": "bfloat16",
"eos_token_id": 151645,
"image_seq_length": 256,
"image_token_id": 151671,
"model_type": "internvl",
"pad_token_id": 151643,
"projector_hidden_act": "gelu",
"text_config": {
"_name_or_path": "/root/codespace/checkpoints/Qwen3-8B",
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 151643,
"debug": false,
"dtype": "bfloat16",
"eos_token_id": 151645,
"ep_size": 1,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 12288,
"layer_types": [
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention"
],
"max_position_embeddings": 40960,
"max_window_layers": 36,
"micro_forward": false,
"model_type": "qwen3",
"num_attention_heads": 32,
"num_hidden_layers": 36,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 1000000,
"skip_checkpoint": false,
"sliding_window": null,
"use_cache": true,
"use_deepep": false,
"use_sliding_window": false,
"vocab_size": 151936
},
"transformers_version": "4.57.1",
"vision_config": {
"architectures": [
"InternVisionModel"
],
"attention_bias": true,
"attention_dropout": 0.0,
"dropout": 0.0,
"dtype": "bfloat16",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 1024,
"image_size": [
448,
448
],
"initializer_factor": 0.1,
"initializer_range": 1e-10,
"intermediate_size": 4096,
"layer_norm_eps": 1e-06,
"layer_scale_init_value": 0.1,
"model_type": "internvl_vision",
"norm_type": "layer_norm",
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 24,
"patch_size": [
14,
14
],
"projection_dropout": 0.0,
"use_absolute_position_embeddings": true,
"use_mask_token": false,
"use_mean_pooling": true,
"use_qk_norm": false
},
"vision_feature_layer": -1,
"vision_feature_select_strategy": "default"
}
|