Value error, Model architectures ['Qwen3_5ForConditionalGeneration'] are not supported for now. Transformers version 5.3.0.dev0
python -c "import transformers; print(transformers.version)"
5.3.0.dev0
print("Initializing vLLM Engine...")
engine_args = AsyncEngineArgs(
model=MODEL_PATH,
tokenizer=MODEL_PATH,
trust_remote_code=True,
tensor_parallel_size=1,
dtype="bfloat16",
quantization="bitsandbytes",
max_model_len=8192,
gpu_memory_utilization=0.85,
enforce_eager=False,
max_num_seqs=4,
)
vllm_engine = AsyncLLMEngine.from_engine_args(engine_args)
Initializing vLLM Engine...
ValidationError Traceback (most recent call last)
Cell In[4], line 14
1 print("Initializing vLLM Engine...")
2 engine_args = AsyncEngineArgs(
3 model=MODEL_PATH,
4 tokenizer=MODEL_PATH,
(...) 12 max_num_seqs=4,
13 )
---> 14 vllm_engine = AsyncLLMEngine.from_engine_args(engine_args)
File ~\miniconda3\envs\vllm_clean\Lib\site-packages\vllm\v1\engine\async_llm.py:231, in AsyncLLM.from_engine_args(cls, engine_args, start_engine_loop, usage_context, stat_loggers)
228 """Create an AsyncLLM from the EngineArgs."""
230 # Create the engine configs.
--> 231 vllm_config = engine_args.create_engine_config(usage_context)
232 executor_class = Executor.get_class(vllm_config)
234 # Create the AsyncLLM.
File ~\miniconda3\envs\vllm_clean\Lib\site-packages\vllm\engine\arg_utils.py:1142, in EngineArgs.create_engine_config(self, usage_context, headless)
1131 device_config = DeviceConfig(
1132 device=cast(Device, current_platform.device_type))
1134 (self.model, self.tokenizer,
1135 self.speculative_config) = maybe_override_with_speculators(
1136 model=self.model,
(...) 1140 vllm_speculative_config=self.speculative_config,
1141 )
-> 1142 model_config = self.create_model_config()
1144 # * If VLLM_USE_V1 is unset, we enable V1 for "supported features"
1145 # and fall back to V0 for experimental or unsupported features.
1146 # * If VLLM_USE_V1=1, we enable V1 for supported + experimental
1147 # features and raise error for unsupported features.
1148 # * If VLLM_USE_V1=0, we disable V1.
1149 use_v1 = False
File ~\miniconda3\envs\vllm_clean\Lib\site-packages\vllm\engine\arg_utils.py:994, in EngineArgs.create_model_config(self)
987 logger.warning(
988 "--enable-multimodal-encoder-data-parallelis deprecated " 989 "and will be removed in v0.13. " 990 "Please use--mm-encoder-tp-mode data` instead.")
992 self.mm_encoder_tp_mode = "data"
--> 994 return ModelConfig(
995 model=self.model,
996 hf_config_path=self.hf_config_path,
997 runner=self.runner,
998 convert=self.convert,
999 task=self.task,
1000 tokenizer=self.tokenizer,
1001 tokenizer_mode=self.tokenizer_mode,
1002 trust_remote_code=self.trust_remote_code,
1003 allowed_local_media_path=self.allowed_local_media_path,
1004 allowed_media_domains=self.allowed_media_domains,
1005 dtype=self.dtype,
1006 seed=self.seed,
1007 revision=self.revision,
1008 code_revision=self.code_revision,
1009 rope_scaling=self.rope_scaling,
1010 rope_theta=self.rope_theta,
1011 hf_token=self.hf_token,
1012 hf_overrides=self.hf_overrides,
1013 tokenizer_revision=self.tokenizer_revision,
1014 max_model_len=self.max_model_len,
1015 quantization=self.quantization,
1016 enforce_eager=self.enforce_eager,
1017 max_logprobs=self.max_logprobs,
1018 logprobs_mode=self.logprobs_mode,
1019 disable_sliding_window=self.disable_sliding_window,
1020 disable_cascade_attn=self.disable_cascade_attn,
1021 skip_tokenizer_init=self.skip_tokenizer_init,
1022 enable_prompt_embeds=self.enable_prompt_embeds,
1023 served_model_name=self.served_model_name,
1024 limit_mm_per_prompt=self.limit_mm_per_prompt,
1025 interleave_mm_strings=self.interleave_mm_strings,
1026 media_io_kwargs=self.media_io_kwargs,
1027 skip_mm_profiling=self.skip_mm_profiling,
1028 config_format=self.config_format,
1029 mm_processor_kwargs=self.mm_processor_kwargs,
1030 mm_processor_cache_gb=self.mm_processor_cache_gb,
1031 mm_processor_cache_type=self.mm_processor_cache_type,
1032 mm_shm_cache_max_object_size_mb=self.
1033 mm_shm_cache_max_object_size_mb,
1034 mm_encoder_tp_mode=self.mm_encoder_tp_mode,
1035 pooler_config=self.pooler_config,
1036 override_pooler_config=self.override_pooler_config,
1037 logits_processor_pattern=self.logits_processor_pattern,
1038 generation_config=self.generation_config,
1039 override_generation_config=self.override_generation_config,
1040 enable_sleep_mode=self.enable_sleep_mode,
1041 model_impl=self.model_impl,
1042 override_attention_dtype=self.override_attention_dtype,
1043 logits_processors=self.logits_processors,
1044 video_pruning_rate=self.video_pruning_rate,
1045 io_processor_plugin=self.io_processor_plugin,
1046 )
File ~\miniconda3\envs\vllm_clean\Lib\site-packages\pydantic_internal_dataclasses.py:121, in complete_dataclass..init(dataclass_self, *args, **kwargs)
119 tracebackhide = True
120 s = dataclass_self
--> 121 s.pydantic_validator.validate_python(ArgsKwargs(args, kwargs), self_instance=s)
ValidationError: 1 validation error for ModelConfig
Value error, Model architectures ['Qwen3_5ForConditionalGeneration'] are not supported for now. Supported architectures: dict_keys(['ApertusForCausalLM', 'AquilaModel', 'AquilaForCausalLM', 'ArceeForCausalLM', 'ArcticForCausalLM', 'MiniMaxForCausalLM', 'MiniMaxText01ForCausalLM', 'MiniMaxM1ForCausalLM', 'BaiChuanForCausalLM', 'BaichuanForCausalLM', 'BailingMoeForCausalLM', 'BailingMoeV2ForCausalLM', 'BambaForCausalLM', 'BloomForCausalLM', 'ChatGLMModel', 'ChatGLMForConditionalGeneration', 'CohereForCausalLM', 'Cohere2ForCausalLM', 'CwmForCausalLM', 'DbrxForCausalLM', 'DeciLMForCausalLM', 'DeepseekForCausalLM', 'DeepseekV2ForCausalLM', 'DeepseekV3ForCausalLM', 'DeepseekV32ForCausalLM', 'Dots1ForCausalLM', 'Ernie4_5ForCausalLM', 'Ernie4_5_MoeForCausalLM', 'ExaoneForCausalLM', 'Exaone4ForCausalLM', 'FalconForCausalLM', 'Fairseq2LlamaForCausalLM', 'GemmaForCausalLM', 'Gemma2ForCausalLM', 'Gemma3ForCausalLM', 'Gemma3nForCausalLM', 'Qwen3NextForCausalLM', 'GlmForCausalLM', 'Glm4ForCausalLM', 'Glm4MoeForCausalLM', 'GptOssForCausalLM', 'GPT2LMHeadModel', 'GPTBigCodeForCausalLM', 'GPTJForCausalLM', 'GPTNeoXForCausalLM', 'GraniteForCausalLM', 'GraniteMoeForCausalLM', 'GraniteMoeHybridForCausalLM', 'GraniteMoeSharedForCausalLM', 'GritLM', 'Grok1ModelForCausalLM', 'HunYuanMoEV1ForCausalLM', 'HunYuanDenseV1ForCausalLM', 'HCXVisionForCausalLM', 'InternLMForCausalLM', 'InternLM2ForCausalLM', 'InternLM2VEForCausalLM', 'InternLM3ForCausalLM', 'JAISLMHeadModel', 'JambaForCausalLM', 'Lfm2ForCausalLM', 'LlamaForCausalLM', 'Llama4ForCausalLM', 'LLaMAForCausalLM', 'LongcatFlashForCausalLM', 'MambaForCausalLM', 'FalconMambaForCausalLM', 'FalconH1ForCausalLM', 'Mamba2ForCausalLM', 'MiniCPMForCausalLM', 'MiniCPM3ForCausalLM', 'MistralForCausalLM', 'MixtralForCausalLM', 'MotifForCausalLM', 'MptForCausalLM', 'MPTForCausalLM', 'MiMoForCausalLM', 'NemotronForCausalLM', 'NemotronHForCausalLM', 'OlmoForCausalLM', 'Olmo2ForCausalLM', 'Olmo3ForCausalLM', 'OlmoeForCausalLM', 'OPTForCausalLM', 'OrionForCausalLM', 'PersimmonForCausalLM', 'PhiForCausalLM', 'Phi3ForCausalLM', 'PhiMoEForCausalLM', 'Plamo2ForCausalLM', 'QWenLMHeadModel', 'Qwen2ForCausalLM', 'Qwen2MoeForCausalLM', 'Qwen3ForCausalLM', 'Qwen3MoeForCausalLM', 'RWForCausalLM', 'SeedOssForCausalLM', 'Step3TextForCausalLM', 'StableLMEpochForCausalLM', 'StableLmForCausalLM', 'Starcoder2ForCausalLM', 'SolarForCausalLM', 'TeleChat2ForCausalLM', 'TeleFLMForCausalLM', 'XverseForCausalLM', 'Zamba2ForCausalLM', 'BertModel', 'Gemma2Model', 'Gemma3TextModel', 'GPT2ForSequenceClassification', 'GteModel', 'GteNewModel', 'InternLM2ForRewardModel', 'JambaForSequenceClassification', 'LlamaModel', 'MistralModel', 'ModernBertModel', 'NomicBertModel', 'Qwen2Model', 'Qwen2ForRewardModel', 'Qwen2ForProcessRewardModel', 'RobertaForMaskedLM', 'RobertaModel', 'XLMRobertaModel', 'LlavaNextForConditionalGeneration', 'Phi3VForCausalLM', 'Qwen2VLForConditionalGeneration', 'PrithviGeoSpatialMAE', 'Terratorch', 'BertForSequenceClassification', 'BertForTokenClassification', 'GteNewForSequenceClassification', 'ModernBertForSequenceClassification', 'RobertaForSequenceClassification', 'XLMRobertaForSequenceClassification', 'JinaVLForRanking', 'AriaForConditionalGeneration', 'AyaVisionForConditionalGeneration', 'Blip2ForConditionalGeneration', 'ChameleonForConditionalGeneration', 'Cohere2VisionForConditionalGeneration', 'DeepseekVLV2ForCausalLM', 'DotsOCRForCausalLM', 'Ernie4_5_VLMoeForConditionalGeneration', 'FuyuForCausalLM', 'Gemma3ForConditionalGeneration', 'Gemma3nForConditionalGeneration', 'GLM4VForCausalLM', 'Glm4vForConditionalGeneration', 'Glm4vMoeForConditionalGeneration', 'GraniteSpeechForConditionalGeneration', 'H2OVLChatModel', 'InternVLChatModel', 'NemotronH_Nano_VL_V2', 'InternS1ForConditionalGeneration', 'InternVLForConditionalGeneration', 'Idefics3ForConditionalGeneration', 'SmolVLMForConditionalGeneration', 'KeyeForConditionalGeneration', 'KeyeVL1_5ForConditionalGeneration', 'RForConditionalGeneration', 'KimiVLForConditionalGeneration', 'Llama_Nemotron_Nano_VL', 'Llama4ForConditionalGeneration', 'LlavaForConditionalGeneration', 'LlavaNextVideoForConditionalGeneration', 'LlavaOnevisionForConditionalGeneration', 'MantisForConditionalGeneration', 'MiDashengLMModel', 'MiniMaxVL01ForConditionalGeneration', 'MiniCPMO', 'MiniCPMV', 'Mistral3ForConditionalGeneration', 'MolmoForCausalLM', 'NVLM_D', 'Ovis', 'Ovis2_5', 'PaliGemmaForConditionalGeneration', 'Phi4MMForCausalLM', 'Phi4MultimodalForCausalLM', 'PixtralForConditionalGeneration', 'QwenVLForConditionalGeneration', 'Qwen2_5_VLForConditionalGeneration', 'Qwen2AudioForConditionalGeneration', 'Qwen2_5OmniModel', 'Qwen2_5OmniForConditionalGeneration', 'Qwen3VLForConditionalGeneration', 'Qwen3VLMoeForConditionalGeneration', 'SkyworkR1VChatModel', 'Step3VLForConditionalGeneration', 'TarsierForConditionalGeneration', 'Tarsier2ForConditionalGeneration', 'UltravoxModel', 'VoxtralForConditionalGeneration', 'WhisperForConditionalGeneration', 'MiMoMTPModel', 'EagleLlamaForCausalLM', 'EagleLlama4ForCausalLM', 'EagleMiniCPMForCausalLM', 'Eagle3LlamaForCausalLM', 'LlamaForCausalLMEagle3', 'EagleDeepSeekMTPModel', 'DeepSeekMTPModel', 'ErnieMTPModel', 'LongCatFlashMTPModel', 'Glm4MoeMTPModel', 'MedusaModel', 'Qwen3NextMTP', 'SmolLM3ForCausalLM', 'Emu3ForConditionalGeneration', 'TransformersModel', 'TransformersForCausalLM', 'TransformersForMultimodalLM']) [type=value_error, input_value=ArgsKwargs((), {'model': ...rocessor_plugin': None}), input_type=ArgsKwargs]
For further information visit https://errors.pydantic.dev/2.12/v/value_error
how do u solve it?
.
It seems that the VLLM team released 0.16.0 with the lack of support of that model!
any solution?
uv pip install -U vllm \
--torch-backend=auto \
--extra-index-url https://wheels.vllm.ai/nightly # add variant subdirectory here if needed
Qwen3.5 is not ready yet for serving in production build, you will have to install nightly version of vllm for now.
Then
uv pip install -U transformers
You will also need to upgrade transformers from 4.5.x to >=5.4.x.
Then you do
vllm serve <mode_name> --port 8081 --host 0.0.0.0 --dtype auto --api-key custom_api_token --trust-remote-code