| import argparse |
| import os |
| import shutil |
| from transformers import T5Tokenizer |
| from tqdm import tqdm |
|
|
|
|
| def parse(): |
| parser = argparse.ArgumentParser() |
|
|
| parser.add_argument("--input-dir", type=str) |
| parser.add_argument("--output-dir", type=str) |
| parser.add_argument("--max-src-length", type=int, default=512) |
| parser.add_argument("--max-dialogue-history-len", type=int, default=256) |
| parser.add_argument("--tokenizer-path", type=str) |
| parser.add_argument("--special-tokens-file", type=str, default=None) |
| parser.add_argument( |
| "--truncate-side", type=str, default="left", choices=["left", "right"] |
| ) |
|
|
| return parser.parse_args() |
|
|
|
|
| def truncate(args): |
| left_tokenizer = T5Tokenizer.from_pretrained( |
| args.tokenizer_path, truncate_side="left" |
| ) |
| right_tokenizer = T5Tokenizer.from_pretrained( |
| args.tokenizer_path, truncate_side="right" |
| ) |
| tokenizer = T5Tokenizer.from_pretrained(args.tokenizer_path) |
|
|
| if args.special_tokens_file is not None: |
| with open(args.special_tokens_file, "r") as reader: |
| special_tokens_dict = { |
| "additional_special_tokens": [ |
| token.strip() for token in reader.readlines() |
| ] |
| } |
|
|
| left_tokenizer.add_special_tokens(special_tokens_dict) |
| right_tokenizer.add_special_tokens(special_tokens_dict) |
| tokenizer.add_special_tokens(special_tokens_dict) |
|
|
| def normalize(x): |
| return tokenizer.decode(tokenizer(x).input_ids[:-1]) |
|
|
| def divide_chunks(src): |
| prefix, postfix = src.split("]", 1) |
| prefix = prefix + "]" |
|
|
| knowledge_start_index = postfix.index("[EK]") |
| dialogue = postfix[: knowledge_start_index - 1] |
| knowledge_and_instruction = postfix[knowledge_start_index - 1 :] |
|
|
| instruction_start_index = knowledge_and_instruction.rfind("[C]") |
| knowledge = knowledge_and_instruction[: instruction_start_index - 1] |
| instruction = knowledge_and_instruction[instruction_start_index - 1 :] |
|
|
| return prefix, dialogue, knowledge, instruction |
|
|
| def token_num(x): |
| return len(tokenizer.tokenize(x)) |
|
|
| min_knowledge_len = token_num(" [EK] None") |
|
|
| if not os.path.exists(args.output_dir): |
| os.makedirs(args.output_dir) |
|
|
| print(f" {os.path.basename(args.input_dir)} ".center(70, "=")) |
|
|
| for filename in os.listdir(args.input_dir): |
| if not filename.endswith(".src"): |
| filepath = os.path.join(args.input_dir, filename) |
| if not os.path.exists(os.path.join(args.output_dir, filename)): |
| if os.path.isfile(filepath): |
| shutil.copyfile( |
| os.path.join(args.input_dir, filename), |
| os.path.join(args.output_dir, filename), |
| ) |
| else: |
| shutil.copytree( |
| os.path.join(args.input_dir, filename), |
| os.path.join(args.output_dir, filename), |
| ) |
| else: |
| dialogue_cut_num = 0 |
| knowledge_cut_num = 0 |
| cut_token_num = 0 |
| |
| with open(os.path.join(args.input_dir, filename), "r") as reader, open( |
| os.path.join(args.output_dir, filename), "w" |
| ) as writer: |
| for line in tqdm(reader.readlines()): |
| src = line.strip() |
| src = normalize(src) |
|
|
| prefix, dialogue, knowledge, instruction = divide_chunks(src) |
|
|
| prefix_token_num = token_num(prefix) |
| dialogue_token_num = token_num(dialogue) |
| knowledge_token_num = token_num(knowledge) |
| instruction_token_num = token_num(instruction) |
|
|
| assert ( |
| args.max_src_length >= prefix_token_num + instruction_token_num |
| ) |
|
|
| origin_src_token_num = ( |
| prefix_token_num |
| + dialogue_token_num |
| + knowledge_token_num |
| + instruction_token_num |
| ) |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| if origin_src_token_num > args.max_src_length: |
| left_token_num = ( |
| args.max_src_length |
| - prefix_token_num |
| - instruction_token_num |
| ) |
| max_dialogue_token_num = min( |
| max( |
| args.max_dialogue_history_len, |
| left_token_num - knowledge_token_num, |
| ), |
| left_token_num - min_knowledge_len, |
| ) |
|
|
| |
| if dialogue_token_num > max_dialogue_token_num: |
| |
| truncate_tokenizer = ( |
| left_tokenizer |
| if args.truncate_side == "left" |
| else right_tokenizer |
| ) |
| dialogue_ids = truncate_tokenizer( |
| dialogue, |
| max_length=max_dialogue_token_num |
| + 1, |
| truncation=True, |
| ).input_ids |
|
|
| dialogue = tokenizer.decode(dialogue_ids[:-1]) |
| dialogue_token_num = max_dialogue_token_num |
| dialogue_cut_num += 1 |
|
|
| |
|
|
| if knowledge_token_num > left_token_num - dialogue_token_num: |
| |
| knowledge_ids = right_tokenizer( |
| knowledge, |
| max_length=left_token_num - dialogue_token_num + 1, |
| truncation=True, |
| ).input_ids |
|
|
| knowledge = tokenizer.decode(knowledge_ids[:-1]) |
|
|
| knowledge = " " + knowledge |
|
|
| knowledge_token_num = left_token_num - dialogue_token_num |
| knowledge_cut_num += 1 |
|
|
| |
| |
| |
|
|
| src = ( |
| prefix.strip() |
| + " " |
| + dialogue.strip() |
| + " " |
| + knowledge.strip() |
| + " " |
| + instruction.strip() |
| ) |
|
|
| src_token_num = token_num(src) |
|
|
| |
|
|
| cut_token_num += origin_src_token_num - src_token_num |
|
|
| prefix, dialogue, knowledge, instruction = divide_chunks(src) |
|
|
| prefix_token_num = token_num(prefix) |
| dialogue_token_num = token_num(dialogue) |
| knowledge_token_num = token_num(knowledge) |
| instruction_token_num = token_num(instruction) |
|
|
| writer.write(src + "\n") |
|
|
| print(f" {filename} ".center(40, "-")) |
| print(f"dialogue cut num: {dialogue_cut_num}") |
| print(f"knowledge cut num: {knowledge_cut_num}") |
| print(f"token cut num: {cut_token_num}") |
|
|
|
|
| if __name__ == "__main__": |
| truncate(parse()) |
|
|