File size: 4,024 Bytes
e4b9a7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
import re
from typing import List

from pythainlp import (
    thai_above_vowels,
    thai_below_vowels,
    thai_consonants,
    thai_follow_vowels,
    thai_lead_vowels,
    thai_letters,
    thai_tonemarks,
)
from pythainlp.tokenize import Tokenizer, subword_tokenize

_r1 = ["เ-ย", "เ-ะ", "แ-ะ", "โ-ะ", "เ-าะ", "เ-อะ", "เ-อ", "เ-า"]
_r2 = ["–ั:วะ", "เ–ี:ยะ", "เ–ือะ", "–ั:ว", "เ–ี:ย", "เ–ื:อ", "–ื:อ"]
tonemarks = {
    i: "ไม้" + j
    for i, j in zip(list(thai_tonemarks), ["เอก", "โท", "ตรี", "จัตวา"])
}

rule1 = [i.replace("-", f"([{thai_letters}](thai_tonemarks)?)") for i in _r1]
rule2 = [i.replace("–", f"([{thai_letters}])").replace(":", "") for i in _r2]
rule3 = [
    i.replace("–", f"([{thai_letters}])").replace(":", f"([{thai_tonemarks}])")
    for i in _r2
]
dict_vowel_ex = {}
for i in _r1 + _r2:
    dict_vowel_ex[i.replace("-", "อ").replace("–", "อ").replace(":", "")] = (
        i.replace("-", "อ").replace(":", "").replace("–", "อ")
    )
dict_vowel = {}
for i in _r1 + _r2:
    dict_vowel[i.replace("-", "อ").replace("–", "อ").replace(":", "")] = (
        i.replace("-", "อ").replace(":", "").replace("–", "อ")
    )
for i in thai_lead_vowels:
    dict_vowel[i] = i + "อ"
for i in thai_follow_vowels:
    dict_vowel[i] = "อ" + i
for i in thai_above_vowels:
    dict_vowel[i] = "อ" + i
for i in thai_below_vowels:
    dict_vowel[i] = "อ" + i

_cut = Tokenizer(list(dict_vowel.keys()) + list(thai_consonants), engine="mm")


def _clean(w):
    if bool(re.match("|".join(rule3), w)):
        for r in rule3:
            if bool(re.match(r, w)):
                w = re.sub(r, "\\1==\\2==", w)
                temp = w.split("==")
                w = (
                    temp[0]
                    + r.replace(f"([{thai_letters}])", "อ").replace(
                        f"([{thai_tonemarks}])", ""
                    )
                    + temp[1]
                )
    elif bool(re.match("|".join(rule2), w)):
        for r in rule2:
            if bool(re.match(r, w)):
                w = re.sub(r, "\\1", w) + r.replace(f"([{thai_letters}])", "อ")
    elif bool(re.match("|".join(rule1), w)):
        for r in rule1:
            if bool(re.match(r, w)):
                w = re.sub(r, "\\1", w) + r.replace(
                    f"([{thai_letters}](thai_tonemarks)?)", "อ"
                )
    return w


def spell_syllable(text: str) -> List[str]:
    """
    Spell out syllables in Thai word distribution form.

    :param str s: Thai syllables only
    :return: List of spelled out syllables
    :rtype: List[str]

    :Example:
    ::

        from pythainlp.util.spell_words import spell_syllable

        print(spell_syllable("แมว"))
        # output: ['มอ', 'วอ', 'แอ', 'แมว']
    """
    tokens = _cut.word_tokenize(_clean(text))

    c_only = [tok + "อ" for tok in tokens if tok in set(thai_consonants)]
    v_only = [dict_vowel[tok] for tok in tokens if tok in set(dict_vowel)]
    t_only = [tonemarks[tok] for tok in tokens if tok in set(tonemarks.keys())]

    return c_only + v_only + t_only + [text]


def spell_word(text: str) -> List[str]:
    """
    Spell out words in Thai word distribution form.

    :param str w: Thai words only
    :return: List of spelled out words
    :rtype: List[str]

    :Example:
    ::

        from pythainlp.util.spell_words import spell_word

        print(spell_word("คนดี"))
        # output: ['คอ', 'นอ', 'คน', 'ดอ', 'อี', 'ดี', 'คนดี']
    """
    spellouts = []
    tokens = subword_tokenize(text, engine="han_solo")

    for tok in tokens:
        spellouts.extend(spell_syllable(tok))

    if len(tokens) > 1:
        spellouts.append(text)

    return spellouts