| import os |
| import sys |
| import json |
| import tqdm |
| import argparse |
| import transformers |
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser(description="Merge the boards into a single train set.") |
| parser.add_argument("-t", "--tokenizer-name-or-path", default="meta-llama/Llama-3.1-8B", help="The name or path for the tokenizer") |
| parser.add_argument("-l", "--limit", type=int, default=4096, help="Length limit in tokens for each post") |
| parser.add_argument("-m", "--min", type=int, default=5, help="Minimum amount of message in each post") |
| parser.add_argument("-i", "--id", default="<|start_header_id|>", help="Prefix token for message IDs") |
| parser.add_argument("-c", "--content", default="<|end_header_id|>", help="Prefix token for message contents") |
| return parser.parse_args() |
|
|
| def main(): |
| args = parse_args() |
| tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer_name_or_path, legacy=True) |
| if args.id not in tokenizer.vocab: |
| print(f"The message ID prefix token \"{args.id}\" is not a token in \"{args.tokenizer_name_or_path}\", it will work but it's better to be a token in the tokenizer.") |
| if args.content not in tokenizer.vocab: |
| print(f"The message content prefix token \"{args.content}\" is not a token in \"{args.tokenizer_name_or_path}\", it will work but it's better to be a token in the tokenizer.") |
| boards_dir = "boards" |
| total_token_count = 0 |
| with open("merged_strings_train.jsonl", "w", encoding="utf8") as output: |
| for board_path in tqdm.tqdm(os.listdir(boards_dir), desc="Boards"): |
| board_name, ext = os.path.splitext(board_path) |
| if ext != ".json": |
| continue |
| board_path = os.path.join(boards_dir, board_path) |
| if not os.path.isfile(board_path): |
| continue |
| with open(board_path, "r", encoding="utf8") as f: |
| board = json.load(f) |
| for post in tqdm.tqdm(board, desc="Posts"): |
| if len(post) < args.min: |
| continue |
| post_content = board_name |
| post_token_count = len(tokenizer.encode(post_content, add_special_tokens=False)) + 2 |
| for message in post: |
| formatted = f"{args.id}{message["id"]}{args.content}{message["content"]}" |
| formatted_token_count = len(tokenizer.encode(formatted, add_special_tokens=False)) |
| added_token_count = post_token_count + formatted_token_count |
| if added_token_count > args.limit: |
| break |
| post_content += formatted |
| post_token_count = added_token_count |
| json.dump({"input": "", "output": post_content}, output, ensure_ascii=False) |
| output.write("\n") |
| total_token_count += post_token_count |
| print("Merge finished, total token count:", total_token_count) |
|
|
| if __name__ == "__main__": |
| try: |
| main() |
| except KeyboardInterrupt: |
| print("\nScript interrupted by user, exiting...") |
| sys.exit(1) |
|
|