File size: 4,718 Bytes
e4b9a7b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
"""
Tool for creating word lists
codes are from Korakot Chaovavanich.
:See also:
* `Facebook post \
<https://www.facebook.com/groups/colab.thailand/permalink/1667821073393244>`_
* `Google Colab \
<https://colab.research.google.com/drive/19kY2jCHONuxmTJM0U8PIE_I5OK1rO-x_>`_
"""
from collections import Counter
from typing import Callable, Iterable, Iterator, List, Set, Tuple
from pythainlp.corpus import thai_words
from pythainlp.tokenize import newmm
from pythainlp.util import Trie
def index_pairs(words: List[str]) -> Iterator[Tuple[int, int]]:
"""
Return beginning and ending indexes of word pairs
"""
i = 0
for w in words:
yield i, i + len(w)
i += len(w)
def find_badwords(
tokenize: Callable[[str], List[str]],
training_data: Iterable[Iterable[str]],
) -> Set[str]:
"""
Find words that do not work well with the `tokenize` function
for the provided `training_data`.
:param Callable[[str], List[str]] tokenize: a tokenize function
:param Iterable[Iterable[str]] training_data: tokenized text, to be used\
as a training set
:return: words that are considered to make `tokenize` perform badly
:rtype: Set[str]
"""
right = Counter()
wrong = Counter()
for train_words in training_data:
train_set = set(index_pairs(train_words))
test_words = tokenize("".join(train_words))
test_pairs = index_pairs(test_words)
for w, p in zip(test_words, test_pairs):
if p in train_set:
right[w] += 1
else:
wrong[w] += 1
# if wrong is more than right, then it's a bad word
bad_words = []
for w, count in wrong.items():
if count > right[w]:
bad_words.append(w)
return set(bad_words)
def revise_wordset(
tokenize: Callable[[str], List[str]],
orig_words: Iterable[str],
training_data: Iterable[Iterable[str]],
) -> Set[str]:
"""
Revise a set of words that could improve tokenization performance of
a dictionary-based `tokenize` function.
`orig_words` will be used as a base set for the dictionary.
Words that do not performed well with `training_data` will be removed.
The remaining words will be returned.
:param Callable[[str], List[str]] tokenize: a tokenize function, can be\
any function that takes a string as input and returns a List[str]
:param Iterable[str] orig_words: words that used by the tokenize function,\
will be used as a base for revision
:param Iterable[Iterable[str]] training_data: tokenized text, to be used\
as a training set
:return: words that are considered to make `tokenize` perform badly
:rtype: Set[str]
:Example::
::
from pythainlp.corpus import thai_words
from pythainlp.corpus.util import revise_wordset
from pythainlp.tokenize.longest import segment
base_words = thai_words()
more_words = {
"ถวิล อุดล", "ทองอินทร์ ภูริพัฒน์", "เตียง ศิริขันธ์", "จำลอง ดาวเรือง"
}
base_words = base_words.union(more_words)
dict_trie = Trie(wordlist)
tokenize = lambda text: segment(text, dict_trie)
training_data = [
[str, str, str. ...],
[str, str, str, str, ...],
...
]
revised_words = revise_wordset(tokenize, wordlist, training_data)
"""
bad_words = find_badwords(tokenize, training_data)
return set(orig_words) - bad_words
def revise_newmm_default_wordset(
training_data: Iterable[Iterable[str]],
) -> Set[str]:
"""
Revise a set of word that could improve tokenization performance of
`pythainlp.tokenize.newmm`, a dictionary-based tokenizer and a default
tokenizer for PyThaiNLP.
Words from `pythainlp.corpus.thai_words()` will be used as a base set
for the dictionary. Words that do not performed well with `training_data`
will be removed. The remaining words will be returned.
:param Iterable[Iterable[str]] training_data: tokenized text, to be used\
as a training set
:return: words that are considered to make `tokenize` perform badly
:rtype: Set[str]
"""
orig_words = thai_words()
trie = Trie(orig_words)
def tokenize(text):
return newmm.segment(text, custom_dict=trie)
revised_words = revise_wordset(tokenize, orig_words, training_data)
return revised_words
|