default_stage: default_modifiers: AWQModifier: config_groups: group_0: targets: [Linear] weights: num_bits: 4 type: int symmetric: true group_size: 32 strategy: group block_structure: null dynamic: false actorder: null scale_dtype: null zp_dtype: null observer: mse observer_kwargs: {} input_activations: null output_activations: null format: null targets: [Linear] ignore: [model.embed_tokens, 're:.*block_sparse_moe[.]e_score_correction_bias$', 're:.*block_sparse_moe[.]gate$', lm_head] bypass_divisibility_checks: false mappings: - smooth_layer: re:.*input_layernorm$ balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$'] activation_hook_target: null balance_exponent: 1 - smooth_layer: re:.*post_attention_layernorm$ balance_layers: ['re:.*block_sparse_moe[.]gate', 're:.*w1$', 're:.*w3$'] activation_hook_target: null balance_exponent: 1 - smooth_layer: re:.*w3$ balance_layers: ['re:.*w2$'] activation_hook_target: null balance_exponent: 1 offload_device: !!python/object/apply:torch.device [cpu] duo_scaling: true n_grid: 20