File size: 1,295 Bytes
70844f0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | import torch
from datasets import load_dataset
from transformers import AutoTokenizer
from auto_round import AutoRound
model_name_or_path = "."
output_dir = "./Qwen3.6-27B-INT8-autoround"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
ignore_keywords =[
"embed_tokens",
"linear_attn",
# "self_attn",
"visual",
"mtp",
"lm_head"
]
layer_config = {}
for keyword in ignore_keywords:
layer_config[keyword] = {"bits": 16}
hf_dataset = load_dataset("NeelNanda/pile-10k", split="train")
seqlen = 2048
tokens_list =[]
max_samples = 512
for item in hf_dataset:
text = item["text"]
tokenized = tokenizer(
text,
truncation=True,
max_length=seqlen,
return_tensors="pt"
)
input_ids = tokenized["input_ids"]
if input_ids.shape[-1] >= seqlen:
tokens_list.append(input_ids)
if len(tokens_list) >= max_samples:
break
ar = AutoRound(
model=model_name_or_path,
tokenizer=tokenizer,
scheme="W8A16",
enable_torch_compile=True,
group_size=-1,
sym=True,
layer_config=layer_config,
dataset=tokens_list,
device_map="0,1",
batch_size=8,
seqlen=seqlen,
iters=1000
)
ar.quantize_and_save(output_dir, format="auto_round")
|