File size: 1,586 Bytes
e4b9a7b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project
# SPDX-FileType: SOURCE
# SPDX-License-Identifier: Apache-2.0
"""
Universal Language Model Fine-tuning for Text Classification (ULMFiT).
Code by Charin Polpanumas
https://github.com/cstorm125/thai2fit/
Some pre-processing functions are from fastai (Apache 2.0)
https://github.com/fastai/fastai/blob/master/fastai/text/transform.py
Universal Language Model Fine-tuning for Text Classification
https://arxiv.org/abs/1801.06146
"""
__all__ = [
"THWIKI_LSTM",
"ThaiTokenizer",
"document_vector",
"merge_wgts",
"post_rules_th",
"post_rules_th_sparse",
"pre_rules_th",
"pre_rules_th_sparse",
"process_thai",
"fix_html",
"lowercase_all",
"remove_space",
"replace_rep_after",
"replace_rep_nonum",
"replace_url",
"replace_wrep_post",
"replace_wrep_post_nonum",
"rm_brackets",
"rm_useless_newlines",
"rm_useless_spaces",
"spec_add_spaces",
"ungroup_emoji",
]
from pythainlp.ulmfit.core import (
THWIKI_LSTM,
document_vector,
merge_wgts,
post_rules_th,
post_rules_th_sparse,
pre_rules_th,
pre_rules_th_sparse,
process_thai,
)
from pythainlp.ulmfit.preprocess import (
fix_html,
lowercase_all,
remove_space,
replace_rep_after,
replace_rep_nonum,
replace_url,
replace_wrep_post,
replace_wrep_post_nonum,
rm_brackets,
rm_useless_newlines,
rm_useless_spaces,
spec_add_spaces,
ungroup_emoji,
)
from pythainlp.ulmfit.tokenizer import ThaiTokenizer
|