File size: 4,562 Bytes
e4b9a7b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
"""
Multi cut -- Thai word segmentation with maximum matching.
Original codes from Korakot Chaovavanich.
:See Also:
* `Facebook post \
<https://www.facebook.com/groups/408004796247683/permalink/431283740586455/>`_
* `GitHub Gist \
<https://gist.github.com/korakot/fe26c65dc9eed467f4497f784a805716>`_
"""
import re
from collections import defaultdict
from typing import Iterator, List
from pythainlp.tokenize import DEFAULT_WORD_DICT_TRIE
from pythainlp.util import Trie
class LatticeString(str):
"""String that keeps possible tokenizations"""
def __new__(cls, value, multi=None, in_dict=True):
return str.__new__(cls, value)
def __init__(self, value, multi=None, in_dict=True):
self.unique = True
if multi:
self.multi = list(multi)
if len(self.multi) > 1:
self.unique = False
else:
self.multi = [value]
self.in_dict = in_dict # if in dictionary
_RE_NONTHAI = r"""(?x)
[-a-zA-Z]+| # Latin characters
\d+([,\.]\d+)*| # numbers
[ \t]+| # spaces
\r?\n # newlines
"""
_PAT_NONTHAI = re.compile(_RE_NONTHAI)
def _multicut(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> Iterator[LatticeString]:
"""Return LatticeString"""
if not custom_dict:
custom_dict = DEFAULT_WORD_DICT_TRIE
len_text = len(text)
words_at = defaultdict(list) # main data structure
def serialize(p, p2): # helper function
for w in words_at[p]:
p_ = p + len(w)
if p_ == p2:
yield w
elif p_ < p2:
for path in serialize(p_, p2):
yield w + "/" + path
q = {0}
last_p = 0 # last position for yield
while min(q) < len_text:
p = min(q)
q -= {p} # q.pop, but for set
for w in custom_dict.prefixes(text[p:]):
words_at[p].append(w)
q.add(p + len(w))
len_q = len(q)
if len_q == 1:
q0 = min(q)
yield LatticeString(text[last_p:q0], serialize(last_p, q0))
last_p = q0
elif len_q == 0: # len(q) == 0 means not found in dictionary
m = _PAT_NONTHAI.match(text[p:])
if m: # non-Thai token
i = p + m.span()[1]
else: # non-Thai token, find minimum skip
for i in range(p, len_text):
ww = custom_dict.prefixes(text[i:])
m = _PAT_NONTHAI.match(text[i:])
if ww or m:
break
else:
i = len_text
w = text[p:i]
words_at[p].append(w)
yield LatticeString(w, in_dict=False)
last_p = i
q.add(i)
def mmcut(text: str) -> List[str]:
res = []
for w in _multicut(text):
mm = min(w.multi, key=lambda x: x.count("/"))
res.extend(mm.split("/"))
return res
def _combine(ww: List[LatticeString]) -> Iterator[str]:
if ww == []:
yield ""
else:
w = ww[0]
for tail in _combine(ww[1:]):
if w.unique:
yield w + "|" + tail
else:
for m in w.multi:
yield m.replace("/", "|") + "|" + tail
def segment(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> List[str]:
"""Dictionary-based maximum matching word segmentation.
:param text: text to be tokenized
:type text: str
:param custom_dict: tokenization dictionary,\
defaults to DEFAULT_WORD_DICT_TRIE
:type custom_dict: Trie, optional
:return: list of segmented tokens
:rtype: List[str]
"""
if not text or not isinstance(text, str):
return []
return list(_multicut(text, custom_dict=custom_dict))
def find_all_segment(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> List[str]:
"""Get all possible segment variations.
:param text: input string to be tokenized
:type text: str
:param custom_dict: tokenization dictionary,\
defaults to DEFAULT_WORD_DICT_TRIE
:type custom_dict: Trie, optional
:return: list of segment variations
:rtype: List[str]
"""
if not text or not isinstance(text, str):
return []
ww = list(_multicut(text, custom_dict=custom_dict))
return list(_combine(ww))
|