| from transformers import AutoTokenizer | |
| from modeling import Hanuman | |
| import torch | |
| def run(prompt: str = "สวัสดี"): | |
| tokenizer = AutoTokenizer.from_pretrained('.') | |
| model = Hanuman.from_pretrained('.', map_location='cpu') | |
| inputs = tokenizer(prompt, return_tensors='pt') | |
| out = model.generate(inputs['input_ids'], max_new_tokens=50, temperature=1.2, top_k=50, top_p=0.95) | |
| print(tokenizer.decode(out[0], skip_special_tokens=True)) | |
| if __name__ == '__main__': | |
| run("สวัสดีครับ ช่วยอธิบายประเทศไทยสั้น ๆ") | |