Colab Demo Not Working

#1
by haseebsultankhan - opened

Colab Demo is not working at all

Error :


AttributeError Traceback (most recent call last)

/tmp/ipykernel_4324/287936975.py in <cell line: 0>() 5 # 1. Load Model and Processor 6 model_name = "oddadmix/Qaari-0.1-Urdu-OCR-VL-2B-Instruct"----> 7 model = Qwen2VLForConditionalGeneration.from_pretrained( 8 model_name, 9 torch_dtype="auto",

7 frames

/usr/local/lib/python3.12/dist-packages/transformers/modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, weights_only, *model_args, **kwargs) 4166 if token is not None: 4167 adapter_kwargs["token"] = token-> 4168 loading_info = model.load_adapter( 4169 _adapter_model_path, 4170 adapter_name=adapter_name,

/usr/local/lib/python3.12/dist-packages/transformers/integrations/peft.py in load_adapter(self, peft_model_id, adapter_name, peft_config, adapter_state_dict, low_cpu_mem_usage, is_trainable, hotswap, local_files_only, adapter_kwargs, load_config, **kwargs) 532 if not hotswap: 533 # Create and add fresh new adapters into the model, unless the weights are hotswapped--> 534 inject_adapter_in_model(peft_config, self, adapter_name) 535 536 if not self._hf_peft_config_loaded:

/usr/local/lib/python3.12/dist-packages/peft/mapping.py in inject_adapter_in_model(peft_config, model, adapter_name, low_cpu_mem_usage, state_dict) 86 87 # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules.---> 88 peft_model = tuner_cls( 89 model, peft_config, adapter_name=adapter_name, low_cpu_mem_usage=low_cpu_mem_usage, state_dict=state_dict 90 )

/usr/local/lib/python3.12/dist-packages/peft/tuners/tuners_utils.py in init(self, model, peft_config, adapter_name, low_cpu_mem_usage, state_dict) 296 self._pre_injection_hook(self.model, self.peft_config[adapter_name], adapter_name) 297 if peft_config != PeftType.XLORA or peft_config[adapter_name] != PeftType.XLORA:--> 298 self.inject_adapter(self.model, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage, state_dict=state_dict) 299 300 # Copy the peft_config in the injected model.

/usr/local/lib/python3.12/dist-packages/peft/tuners/tuners_utils.py in inject_adapter(self, model, adapter_name, autocast_adapter_dtype, low_cpu_mem_usage, state_dict) 802 ctx = init_empty_weights if low_cpu_mem_usage else nullcontext 803 with ctx():--> 804 self._create_and_replace( 805 peft_config, adapter_name, target, target_name, parent, current_key=key 806 )

/usr/local/lib/python3.12/dist-packages/peft/tuners/lora/model.py in _create_and_replace(self, lora_config, adapter_name, target, target_name, parent, current_key, parameter_name) 248 ) 249 device_map = self.model.hf_device_map if hasattr(self.model, "hf_device_map") else None--> 250 new_module = self._create_new_module(lora_config, adapter_name, target, device_map=device_map, **kwargs) 251 if adapter_name not in self.active_adapters: 252 # adding an additional adapter: it is not automatically trainable

/usr/local/lib/python3.12/dist-packages/peft/tuners/lora/model.py in _create_new_module(lora_config, adapter_name, target, **kwargs) 335 new_module = None 336 for dispatcher in dispatchers:--> 337 new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs) 338 if new_module is not None: # first match wins 339 break

/usr/local/lib/python3.12/dist-packages/peft/tuners/lora/bnb.py in dispatch_bnb_4bit(target, adapter_name, **kwargs) 603 { 604 "compute_dtype": target_base_layer.compute_dtype,--> 605 "compress_statistics": target_base_layer.weight.compress_statistics, 606 "quant_type": target_base_layer.weight.quant_type, 607 }

AttributeError: 'Parameter' object has no attribute 'compress_statistics'

I will try to take a look

Sign up or log in to comment