File size: 2,987 Bytes
e4b9a7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
import torch


class ChatBotModel:
    def __init__(self):
        """
        Chat using AI generation
        """
        self.history = []

    def reset_chat(self):
        """
        Reset chat by cleaning history
        """
        self.history = []

    def load_model(
        self,
        model_name: str = "wangchanglm",
        return_dict: bool = True,
        load_in_8bit: bool = False,
        device: str = "cuda",
        torch_dtype=torch.float16,
        offload_folder: str = "./",
        low_cpu_mem_usage: bool = True,
    ):
        """
        Load model

        :param str model_name: Model name (Now, we support wangchanglm only)
        :param bool return_dict: return_dict
        :param bool load_in_8bit: load model in 8bit
        :param str device: device (cpu, cuda or other)
        :param torch_dtype torch_dtype: torch_dtype
        :param str offload_folder: offload folder
        :param bool low_cpu_mem_usage: low cpu mem usage
        """
        if model_name == "wangchanglm":
            from pythainlp.generate.wangchanglm import WangChanGLM

            self.model = WangChanGLM()
            self.model.load_model(
                model_path="pythainlp/wangchanglm-7.5B-sft-en-sharded",
                return_dict=return_dict,
                load_in_8bit=load_in_8bit,
                offload_folder=offload_folder,
                device=device,
                torch_dtype=torch_dtype,
                low_cpu_mem_usage=low_cpu_mem_usage,
            )
        else:
            raise NotImplementedError(f"We doesn't support {model_name}.")

    def chat(self, text: str) -> str:
        """
        Chatbot

        :param str text: text for asking chatbot with.
        :return: answer from chatbot.
        :rtype: str
        :Example:
        ::

                from pythainlp.chat import ChatBotModel
                import torch

                chatbot = ChatBotModel()
                chatbot.load_model(device="cpu",torch_dtype=torch.bfloat16)

                print(chatbot.chat("สวัสดี"))
                # output: ยินดีที่ได้รู้จัก

                print(chatbot.history)
                # output: [('สวัสดี', 'ยินดีที่ได้รู้จัก')]
        """
        _temp = ""
        if self.history:
            for h, b in self.history:
                _temp += (
                    self.model.PROMPT_DICT["prompt_chatbot"].format_map(
                        {"human": h, "bot": b}
                    )
                    + self.model.stop_token
                )
        _temp += self.model.PROMPT_DICT["prompt_chatbot"].format_map(
            {"human": text, "bot": ""}
        )
        _bot = self.model.gen_instruct(_temp)
        self.history.append((text, _bot))
        return _bot