File size: 7,181 Bytes
e4b9a7b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
import re
import torch
class WangChanGLM:
def __init__(self):
self.exclude_pattern = re.compile(r'[^ก-๙]+')
self.stop_token = "\n"
self.PROMPT_DICT = {
"prompt_input": (
"<context>: {input}\n<human>: {instruction}\n<bot>: "
),
"prompt_no_input": (
"<human>: {instruction}\n<bot>: "
),
"prompt_chatbot": (
"<human>: {human}\n<bot>: {bot}"
),
}
def is_exclude(self, text:str)->bool:
return bool(self.exclude_pattern.search(text))
def load_model(
self,
model_path:str="pythainlp/wangchanglm-7.5B-sft-en-sharded",
return_dict:bool=True,
load_in_8bit:bool=False,
device:str="cuda",
torch_dtype=torch.float16,
offload_folder:str="./",
low_cpu_mem_usage:bool=True
):
"""
Load model
:param str model_path: model path
:param bool return_dict: return dict
:param bool load_in_8bit: load model in 8bit
:param str device: device (cpu, cuda or other)
:param torch_dtype torch_dtype: torch_dtype
:param str offload_folder: offload folder
:param bool low_cpu_mem_usage: low cpu mem usage
"""
import pandas as pd
from transformers import AutoModelForCausalLM, AutoTokenizer
self.device = device
self.torch_dtype = torch_dtype
self.model_path = model_path
self.model = AutoModelForCausalLM.from_pretrained(
self.model_path,
return_dict=return_dict,
load_in_8bit=load_in_8bit,
device_map=device,
torch_dtype=torch_dtype,
offload_folder=offload_folder,
low_cpu_mem_usage=low_cpu_mem_usage
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
self.df = pd.DataFrame(self.tokenizer.vocab.items(), columns=['text', 'idx'])
self.df['is_exclude'] = self.df.text.map(self.is_exclude)
self.exclude_ids = self.df[self.df.is_exclude is True].idx.tolist()
def gen_instruct(
self,
text:str,
max_new_tokens:int=512,
top_p:float=0.95,
temperature:float=0.9,
top_k:int=50,
no_repeat_ngram_size:int=2,
typical_p:float=1.,
thai_only:bool=True,
skip_special_tokens:bool=True
):
"""
Generate Instruct
:param str text: text
:param int max_new_tokens: maximum number of new tokens
:param float top_p: top p
:param float temperature: temperature
:param int top_k: top k
:param int no_repeat_ngram_size: do not repeat ngram size
:param float typical_p: typical p
:param bool thai_only: Thai only
:param bool skip_special_tokens: skip special tokens
:return: the answer from Instruct
:rtype: str
"""
batch = self.tokenizer(text, return_tensors="pt")
with torch.autocast(device_type=self.device, dtype=self.torch_dtype):
if thai_only:
output_tokens = self.model.generate(
input_ids=batch["input_ids"],
max_new_tokens=max_new_tokens, # 512
begin_suppress_tokens = self.exclude_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
#oasst k50
top_k=top_k,
top_p=top_p, # 0.95
typical_p=typical_p,
temperature=temperature, # 0.9
)
else:
output_tokens = self.model.generate(
input_ids=batch["input_ids"],
max_new_tokens=max_new_tokens, # 512
no_repeat_ngram_size=no_repeat_ngram_size,
#oasst k50
top_k=top_k,
top_p=top_p, # 0.95
typical_p=typical_p,
temperature=temperature, # 0.9
)
return self.tokenizer.decode(output_tokens[0][len(batch["input_ids"][0]):], skip_special_tokens=skip_special_tokens)
def instruct_generate(
self,
instruct: str,
context: str = None,
max_new_tokens=512,
temperature: float =0.9,
top_p: float = 0.95,
top_k:int=50,
no_repeat_ngram_size:int=2,
typical_p:float=1,
thai_only:bool=True,
skip_special_tokens:bool=True
):
"""
Generate Instruct
:param str instruct: Instruct
:param str context: context
:param int max_new_tokens: maximum number of new tokens
:param float top_p: top p
:param float temperature: temperature
:param int top_k: top k
:param int no_repeat_ngram_size: do not repeat ngram size
:param float typical_p: typical p
:param bool thai_only: Thai only
:param bool skip_special_tokens: skip special tokens
:return: the answer from Instruct
:rtype: str
:Example:
::
from pythainlp.generate.wangchanglm import WangChanGLM
import torch
model = WangChanGLM()
model.load_model(device="cpu",torch_dtype=torch.bfloat16)
print(model.instruct_generate(instruct="ขอวิธีลดน้ำหนัก"))
# output: ลดน้ําหนักให้ได้ผล ต้องทําอย่างค่อยเป็นค่อยไป
# ปรับเปลี่ยนพฤติกรรมการกินอาหาร
# ออกกําลังกายอย่างสม่ําเสมอ
# และพักผ่อนให้เพียงพอ
# ที่สําคัญควรหลีกเลี่ยงอาหารที่มีแคลอรี่สูง
# เช่น อาหารทอด อาหารมัน อาหารที่มีน้ําตาลสูง
# และเครื่องดื่มแอลกอฮอล์
"""
if context in (None, ""):
prompt = self.PROMPT_DICT['prompt_no_input'].format_map(
{'instruction': instruct, 'input': ''}
)
else:
prompt = self.PROMPT_DICT['prompt_input'].format_map(
{'instruction': instruct, 'input': context}
)
result = self.gen_instruct(
prompt,
max_new_tokens=max_new_tokens,
top_p=top_p,
top_k=top_k,
temperature=temperature,
no_repeat_ngram_size=no_repeat_ngram_size,
typical_p=typical_p,
thai_only=thai_only,
skip_special_tokens=skip_special_tokens
)
return result
|