| ''' |
| BERT Tokenizer custom for Math |
| ''' |
|
|
| import os |
| import re |
| from const import * |
| from shutil import copyfile |
| from typing import List, Optional, Tuple |
|
|
| from transformers.tokenization_utils import PreTrainedTokenizer |
| from transformers.utils import logging |
|
|
| logger = logging.get_logger(__name__) |
|
|
| VOCAB_FILES_NAMES = { |
| 'vocab_file': 'vocab.txt', |
| 'merges_file': 'bpe.codes', |
| } |
|
|
| PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
| "vinai/phobert-base": 256, |
| "vinai/phobert-large": 256, |
| } |
|
|
|
|
| def get_pairs(word): |
| ''' |
| Return set of symbol pairs in a word. |
| |
| Word is represented as tuple of symbols (symbols being variable-length strings). |
| ''' |
|
|
| pairs = set() |
| prev_char = word[0] |
| for char in word[1:]: |
| pairs.add((prev_char, char)) |
| prev_char = char |
|
|
| pairs = set(pairs) |
| return pairs |
|
|
|
|
| class PhobertTokenizer(PreTrainedTokenizer): |
| ''' |
| Construct a PhoBERT tokenizer. Based on Byte-Pair-Encoding. |
| This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to |
| this superclass for more information regarding those methods. |
| |
| |
| |
| Args: |
| vocab_file (`str`): |
| Path to the vocabulary file. |
| merges_file (`str`): |
| Path to the merges file. |
| bos_token (`st`, *optional*, defaults to `"<s>"`): |
| The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. |
| <Tip> |
| When building a sequence using special tokens, this is not the token that is used for the beginning of |
| sequence. The token used is the `cls_token`. |
| </Tip> |
| eos_token (`str`, *optional*, defaults to `"</s>"`): |
| The end of sequence token. |
| <Tip> |
| When building a sequence using special tokens, this is not the token that is used for the end of sequence. |
| The token used is the `sep_token`. |
| </Tip> |
| sep_token (`str`, *optional*, defaults to `"</s>"`): |
| The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for |
| sequence classification or for a text and a question for question answering. It is also used as the last |
| token of a sequence built with special tokens. |
| cls_token (`str`, *optional*, defaults to `"<s>"`): |
| The classifier token which is used when doing sequence classification (classification of the whole sequence |
| instead of per-token classification). It is the first token of the sequence when built with special tokens. |
| unk_token (`str`, *optional*, defaults to `"<unk>"`): |
| The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this |
| token instead. |
| pad_token (`str`, *optional*, defaults to `"<pad>"`): |
| The token used for padding, for example when batching sequences of different lengths. |
| mask_token (`str`, *optional*, defaults to `"<mask>"`): |
| The token used for masking values. This is the token used when training this model with masked language |
| modeling. This is the token which the model will try to predict. |
| ''' |
| vocab_files_names = VOCAB_FILES_NAMES |
| max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
| model_input_names = ["input_ids", "attention_mask"] |
|
|
| def __init__(self, |
| vocab_file, |
| merges_file, |
| bos_token="<s>", |
| eos_token="</s>", |
| sep_token="</s>", |
| cls_token="<s>", |
| unk_token="<unk>", |
| pad_token="<pad>", |
| mask_token="<mask>", |
| **kwargs): |
| super().__init__( |
| bos_token=bos_token, |
| eos_token=eos_token, |
| unk_token=unk_token, |
| sep_token=sep_token, |
| cls_token=cls_token, |
| pad_token=pad_token, |
| mask_token=mask_token, |
| **kwargs, |
| ) |
|
|
| self.vocab_file = vocab_file |
| self.merges_file = merges_file |
|
|
| self.encoder = {} |
| self.encoder[self.bos_token] = 0 |
| self.encoder[self.pad_token] = 1 |
| self.encoder[self.eos_token] = 2 |
| self.encoder[self.unk_token] = 3 |
|
|
| self.add_from_file(vocab_file) |
| self.encoder[self.mask_token] = len(self.encoder) |
|
|
| self.decoder = {v : k for k, v in self.encoder.items()} |
|
|
|
|
| with open(merges_file, encoding="utf-8") as merges_handle: |
| merges = merges_handle.read().split("\n")[:-1] |
| merges = [tuple(merge.split()[:-1]) for merge in merges] |
| self.bpe_ranks = dict(zip(merges, range(len(merges)))) |
| |
| self.cache = {} |
|
|
| def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None) -> List[int]: |
| |
| """ |
| Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
| adding special tokens. A PhoBERT sequence has the following format: |
| - single sequence: `<s> X </s>` |
| - pair of sequences: `<s> A </s></s> B </s>` |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs to which the special tokens will be added. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| Returns: |
| `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
| """ |
|
|
| if token_ids_1 is None: |
| |
| return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] |
| cls = [self.cls_token_id] |
| sep = [self.sep_token_id] |
| return cls + token_ids_0 + sep + sep + token_ids_1 + sep |
| |
| |
| def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False) -> List[int]: |
|
|
| """ |
| Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding |
| special tokens using the tokenizer `prepare_for_model` method. |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| already_has_special_tokens (`bool`, *optional*, defaults to `False`): |
| Whether or not the token list is already formatted with special tokens for the model. |
| Returns: |
| `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. |
| """ |
|
|
| if already_has_special_tokens: |
| return super().get_special_tokens_mask( |
| token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True |
| ) |
|
|
| if token_ids_1 is None: |
| return [1] + ([0] * len(token_ids_0)) + [1] |
| return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] |
| |
|
|
| def create_token_type_ids_from_sequences( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| ) -> List[int]: |
| """ |
| Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not |
| make use of token type ids, therefore a list of zeros is returned. |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| Returns: |
| `List[int]`: List of zeros. |
| """ |
|
|
| sep = [self.sep_token_id] |
| cls = [self.cls_token_id] |
|
|
| if token_ids_1 is None: |
| return len(cls + token_ids_0 + sep) * [0] |
| return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] |
|
|
| @property |
| def vocab_size(self): |
| return len(self.encoder) |
|
|
| def get_vocab(self): |
| return dict(self.encoder, **self.added_tokens_encoder) |
| |
|
|
|
|
| def bpe(self, token): |
| if token in self.cache: |
| return self.cache[token] |
| word = tuple(token) |
| word = tuple(list(word[:-1]) + [word[-1] + "</w>"]) |
| pairs = get_pairs(word) |
|
|
| if not pairs: |
| return token |
|
|
| while True: |
| bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) |
| if bigram not in self.bpe_ranks: |
| break |
| first, second = bigram |
| new_word = [] |
| i = 0 |
| while i < len(word): |
| try: |
| j = word.index(first, i) |
| except ValueError: |
| new_word.extend(word[i:]) |
| break |
| else: |
| new_word.extend(word[i:j]) |
| i = j |
|
|
| if word[i] == first and i < len(word) - 1 and word[i + 1] == second: |
| new_word.append(first + second) |
| i += 2 |
| else: |
| new_word.append(word[i]) |
| i += 1 |
| new_word = tuple(new_word) |
| word = new_word |
| if len(word) == 1: |
| break |
| else: |
| pairs = get_pairs(word) |
| word = "@@ ".join(word) |
| word = word[:-4] |
| self.cache[token] = word |
| return word |
|
|
| def _tokenize(self, text): |
| """Tokenize a string.""" |
| split_tokens = [] |
|
|
| words = re.findall(r"\S+\n?", text) |
|
|
| for token in words: |
| split_tokens.extend([t for t in self.bpe(token).split(" ")]) |
| return split_tokens |
|
|
| def _convert_token_to_id(self, token): |
| """Converts a token (str) in an id using the vocab.""" |
| return self.encoder.get(token, self.encoder.get(self.unk_token)) |
|
|
| def _convert_id_to_token(self, index): |
| """Converts an index (integer) in a token (str) using the vocab.""" |
| return self.decoder.get(index, self.unk_token) |
|
|
| def convert_tokens_to_string(self, tokens): |
| """Converts a sequence of tokens (string) in a single string.""" |
| out_string = " ".join(tokens).replace("@@ ", "").strip() |
| return out_string |
|
|
| def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
| if not os.path.isdir(save_directory): |
| logger.error(f"Vocabulary path ({save_directory}) should be a directory.") |
| return |
|
|
| out_vocab_file = os.path.join( |
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] |
| ) |
|
|
| if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): |
| copyfile(self.vocab_file, out_vocab_file) |
| elif not os.path.isfile(self.vocab_file): |
| with open(out_vocab_file, "w", encoding="utf-8") as fp: |
| for token, value in self.encoder.items(): |
| if token not in self.all_special_tokens: |
| fp.write(f"{str(token)} 1\n") |
|
|
| out_merges_file = os.path.join( |
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] |
| ) |
|
|
| if os.path.abspath(self.merges_file) != os.path.abspath(out_merges_file) and os.path.isfile(self.merges_file): |
| copyfile(self.merges_file, out_merges_file) |
| elif not os.path.isfile(self.merges_file): |
| index = 0 |
| with open(out_merges_file, "w", encoding="utf-8") as writer: |
| for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): |
| if index != token_index: |
| logger.warning( |
| f"Saving vocabulary to {out_merges_file}: BPE merge indices are not consecutive." |
| " Please check that the tokenizer is not corrupted!" |
| ) |
| index = token_index |
| writer.write(" ".join(bpe_tokens) + " 1\n") |
| index += 1 |
|
|
| return (out_vocab_file, out_merges_file) |
|
|
| |
| |
| |
| |
| |
|
|
|
|
|
|
| def add_from_file(self, f): |
| """ |
| Loads a pre-existing dictionary from a text file and adds its symbols to this instance. |
| """ |
|
|
| for word in LATEX_VOC: |
| self.encoder[word] = len(self.encoder) |
| if isinstance(f, str): |
| try: |
| with open(f, "r", encoding="utf-8") as fd: |
| self.add_from_file(fd) |
| except FileNotFoundError as fnfe: |
| raise fnfe |
| except UnicodeError: |
| raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset") |
| return |
|
|
| lines = f.readlines() |
| for lineTmp in lines: |
| line = lineTmp.strip() |
| idx = line.rfind(" ") |
| if idx == -1: |
| raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'") |
| word = line[:idx] |
| self.encoder[word] = len(self.encoder) |
|
|