Adapt tokenization_interns1.py to transformers>=5.0.0 (#15)
Browse files- Adapt tokenization_interns1.py to transformers>=5.0.0 (65efc111527b17792b13f09fbe7ddbb9d9df263e)
- Update: fix bug (7260346cf2e4d0d8faf80b45ff37a06a14e4b74a)
- Update: fix bug (7b33977d07711c291f31851e21f44f995dc74c3f)
Co-authored-by: Kevin Zhang <Zhangyc02@users.noreply.huggingface.co>
- tokenization_interns1.py +8 -6
tokenization_interns1.py
CHANGED
|
@@ -24,11 +24,14 @@ from functools import lru_cache
|
|
| 24 |
import regex as re
|
| 25 |
import sentencepiece as spm
|
| 26 |
|
|
|
|
| 27 |
from transformers.tokenization_utils_base import AddedToken, TextInput
|
| 28 |
-
from transformers.tokenization_utils import PreTrainedTokenizer
|
| 29 |
from transformers.utils import logging
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
logger = logging.get_logger(__name__)
|
| 34 |
|
|
@@ -506,6 +509,7 @@ class InternS1Tokenizer(PreTrainedTokenizer):
|
|
| 506 |
pad_token="<|endoftext|>",
|
| 507 |
clean_up_tokenization_spaces=False,
|
| 508 |
split_special_tokens=False,
|
|
|
|
| 509 |
**kwargs,
|
| 510 |
):
|
| 511 |
bos_token = (
|
|
@@ -566,6 +570,7 @@ class InternS1Tokenizer(PreTrainedTokenizer):
|
|
| 566 |
pad_token=pad_token,
|
| 567 |
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 568 |
split_special_tokens=split_special_tokens,
|
|
|
|
| 569 |
**kwargs,
|
| 570 |
)
|
| 571 |
|
|
@@ -715,9 +720,6 @@ class InternS1Tokenizer(PreTrainedTokenizer):
|
|
| 715 |
|
| 716 |
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
|
| 717 |
|
| 718 |
-
if kwargs:
|
| 719 |
-
logger.warning(f"Keyword arguments {kwargs} not recognized.")
|
| 720 |
-
|
| 721 |
if hasattr(self, "do_lower_case") and self.do_lower_case:
|
| 722 |
# convert non-special tokens to lowercase. Might be super slow as well?
|
| 723 |
escaped_special_toks = [re.escape(s_tok) for s_tok in (self.all_special_tokens)]
|
|
|
|
| 24 |
import regex as re
|
| 25 |
import sentencepiece as spm
|
| 26 |
|
| 27 |
+
import transformers
|
| 28 |
from transformers.tokenization_utils_base import AddedToken, TextInput
|
|
|
|
| 29 |
from transformers.utils import logging
|
| 30 |
+
from packaging import version
|
| 31 |
+
if version.parse(transformers.__version__) >= version.parse("5.0.0"):
|
| 32 |
+
from transformers.tokenization_python import PreTrainedTokenizer
|
| 33 |
+
else:
|
| 34 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
| 35 |
|
| 36 |
logger = logging.get_logger(__name__)
|
| 37 |
|
|
|
|
| 509 |
pad_token="<|endoftext|>",
|
| 510 |
clean_up_tokenization_spaces=False,
|
| 511 |
split_special_tokens=False,
|
| 512 |
+
special_tokens_pattern="none",
|
| 513 |
**kwargs,
|
| 514 |
):
|
| 515 |
bos_token = (
|
|
|
|
| 570 |
pad_token=pad_token,
|
| 571 |
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 572 |
split_special_tokens=split_special_tokens,
|
| 573 |
+
special_tokens_pattern="none",
|
| 574 |
**kwargs,
|
| 575 |
)
|
| 576 |
|
|
|
|
| 720 |
|
| 721 |
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
|
| 722 |
|
|
|
|
|
|
|
|
|
|
| 723 |
if hasattr(self, "do_lower_case") and self.do_lower_case:
|
| 724 |
# convert non-special tokens to lowercase. Might be super slow as well?
|
| 725 |
escaped_special_toks = [re.escape(s_tok) for s_tok in (self.all_special_tokens)]
|