File size: 3,075 Bytes
e4b9a7b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
import yaml
from pythainlp.benchmarks import word_tokenization
with open("./tests/data/sentences.yml", "r", encoding="utf8") as stream:
TEST_DATA = yaml.safe_load(stream)
class BenchmarksTestCaseX(unittest.TestCase):
def test_preprocessing(self):
self.assertIsNotNone(
word_tokenization.preprocessing(
txt="ทดสอบ การ ทำ ความสะอาด ข้อมูล<tag>ok</tag>"
)
)
def test_benchmark_not_none(self):
self.assertIsNotNone(
word_tokenization.benchmark(
["วัน", "จัน", "ทร์", "สี", "เหลือง"],
["วัน", "จันทร์", "สี", "เหลือง"],
)
)
def test_binary_representation(self):
sentence = "อากาศ|ร้อน|มาก|ครับ"
rept = word_tokenization._binary_representation(sentence)
self.assertEqual(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0], rept.tolist()
)
def test_compute_stats(self):
for pair in TEST_DATA["sentences"]:
exp, act = pair["expected"], pair["actual"]
result = word_tokenization.compute_stats(
word_tokenization.preprocessing(exp),
word_tokenization.preprocessing(act),
)
self.assertIsNotNone(result)
def test_benchmark(self):
expected = []
actual = []
for pair in TEST_DATA["sentences"]:
expected.append(pair["expected"])
actual.append(pair["actual"])
df = word_tokenization.benchmark(expected, actual)
self.assertIsNotNone(df)
def test_count_correctly_tokenised_words(self):
for d in TEST_DATA["binary_sentences"]:
sample = np.array(list(d["actual"])).astype(int)
ref_sample = np.array(list(d["expected"])).astype(int)
sb = list(word_tokenization._find_word_boundaries(sample))
rb = list(word_tokenization._find_word_boundaries(ref_sample))
# in binary [{0, 1}, ...]
correctly_tokenized_words = (
word_tokenization._find_words_correctly_tokenised(rb, sb)
)
self.assertEqual(
np.sum(correctly_tokenized_words), d["expected_count"]
)
def test_words_correctly_tokenised(self):
r = [(0, 2), (2, 10), (10, 12)]
s = [(0, 10), (10, 12)]
expected = "01"
labels = word_tokenization._find_words_correctly_tokenised(r, s)
self.assertEqual(expected, "".join(np.array(labels).astype(str)))
def test_flatten_result(self):
result = {"key1": {"v1": 6}, "key2": {"v2": 7}}
actual = word_tokenization._flatten_result(result)
self.assertEqual(actual, {"key1:v1": 6, "key2:v2": 7})
|