File size: 465 Bytes
daf9abb | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 | quant_stage:
quant_modifiers:
QuantizationModifier:
targets: [Linear]
ignore: []
kv_cache_scheme:
num_bits: 8
type: float
symmetric: true
group_size: null
strategy: tensor
block_structure: null
dynamic: false
actorder: null
scale_dtype: null
zp_dtype: null
observer: memoryless_minmax
observer_kwargs: {}
bypass_divisibility_checks: false
|