File size: 1,966 Bytes
e4b9a7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from transformers import M2M100ForConditionalGeneration

from .tokenization_small100 import SMALL100Tokenizer


class Small100Translator:
    """
    Machine Translation using small100 model

    - Huggingface https://huggingface.co/alirezamsh/small100

    :param bool use_gpu : load model using GPU (Default is False)
    """

    def __init__(
        self,
        use_gpu: bool = False,
        pretrained: str = "alirezamsh/small100",
    ) -> None:
        self.pretrained = pretrained
        self.model = M2M100ForConditionalGeneration.from_pretrained(self.pretrained)
        self.tgt_lang = None
        if use_gpu:
            self.model = self.model.cuda()

    def translate(self, text: str, tgt_lang: str="en") -> str:
        """
        Translate text from X to X

        :param str text: input text in source language
        :param str tgt_lang: target language
        :return: translated text in target language
        :rtype: str

        :Example:

        ::

            from pythainlp.translate.small100 import Small100Translator

            mt = Small100Translator()

            # Translate text from Thai to English
            mt.translate("ทดสอบระบบ", tgt_lang="en")
            # output: 'Testing system'

            # Translate text from Thai to Chinese
            mt.translate("ทดสอบระบบ", tgt_lang="zh")
            # output: '系统测试'

            # Translate text from Thai to French
            mt.translate("ทดสอบระบบ", tgt_lang="fr")
            # output: 'Test du système'

        """
        if tgt_lang!=self.tgt_lang:
            self.tokenizer = SMALL100Tokenizer.from_pretrained(self.pretrained, tgt_lang=tgt_lang)
            self.tgt_lang = tgt_lang
        self.translated = self.model.generate(
            **self.tokenizer(text, return_tensors="pt")
        )
        return self.tokenizer.batch_decode(self.translated, skip_special_tokens=True)[0]