| |
|
|
| import argparse |
| import json |
| import random |
| from ltp import LTP |
| from tqdm import tqdm |
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--input", type=str, required=True) |
| parser.add_argument("--output", type=str, required=True) |
| parser.add_argument("--ltp_model", type=str, required=True) |
| parser.add_argument("--basic_hanzi", type=str, default="confusion/basic_hanzi_2500.txt") |
| parser.add_argument("--sound_confusion", type=str, default="confusion/sound_confusion.txt") |
| parser.add_argument("--shape_confusion", type=str, default="confusion/shape_confusion.txt") |
| parser.add_argument("--same_ratio", type=float, default=0.1) |
| parser.add_argument("--repeat_ratio", type=float, default=0.15) |
| parser.add_argument("--delete_ratio", type=float, default=0.15) |
| parser.add_argument("--sound_ratio", type=float, default=0.5) |
| parser.add_argument("--shape_ratio", type=float, default=0.1) |
| parser.add_argument("--whitelist", type=str, default="一二三四五六七八九十") |
| parser.add_argument("--seed", type=int, default=42) |
| args = parser.parse_args() |
| return args |
|
|
| def isChinese(word): |
| for ch in word: |
| cp = ord(ch) |
| if cp >= 0x4E00 and cp <= 0x9FA5: |
| continue |
| return False |
| return True |
|
|
| def load_hanzi(path): |
| hanzi = set() |
| with open(path, mode="r", encoding="utf-8") as handle: |
| for line in handle: |
| line = line.strip() |
| assert len(line) == 1 |
| hanzi.update(line) |
| return hanzi |
|
|
| def load_confusion_set(path, hanzi): |
| confusion_set = {} |
| with open(path, mode="r", encoding="utf-8") as handle: |
| for line in handle: |
| line = line.strip().split() |
| if len(line) < 2: continue |
| key, val = line[0], [] |
| for c in line[1]: |
| if c in hanzi and c not in val and c != key: |
| val.append(c) |
| if val: |
| confusion_set[key] = val |
| return confusion_set |
|
|
| def do_mask(sent, args): |
| |
| cws, pos = args.ltp.pipeline(sent, tasks=["cws", "pos"], return_dict=False) |
| |
| n = len(cws) |
| i = random.choice(range(n)) |
| word = cws[i] |
| if not isChinese(word): |
| i = random.choice(range(n)) |
| word = cws[i] |
| if not isChinese(word): |
| return sent |
|
|
| p = random.random() |
| p1 = args.same_ratio |
| p2 = p1 + args.repeat_ratio |
| p3 = p2 + args.delete_ratio |
| p4 = p3 + args.sound_ratio |
| p5 = p4 + args.shape_ratio |
| assert abs(p5 - 1) < 0.001 |
| if p < p1: |
| return sent |
| if p < p2: |
| |
| cws[i] += word |
| return ''.join(cws) |
| if pos[i] in ['nh', 'ns']: |
| |
| return sent |
| chars = list(word) |
| k = random.choice(range(len(word))) |
| c = chars[k] |
| if c in args.whitelist: |
| return sent |
| if p < p3: |
| if len(word) < 2: |
| return sent |
| chars[k] = '' |
| cws[i] = ''.join(chars) |
| return ''.join(cws) |
| if p < p4: |
| if c in args.sound_set: |
| chars[k] = random.choice(args.sound_set[c]) |
| else: |
| if c in args.shape_set: |
| chars[k] = random.choice(args.shape_set[c]) |
| cws[i] = ''.join(chars) |
| return ''.join(cws) |
|
|
| if __name__ == "__main__": |
| args = parse_args() |
| random.seed(args.seed) |
| |
| hanzi = load_hanzi(args.basic_hanzi) |
|
|
| |
| args.sound_set = load_confusion_set(args.sound_confusion, hanzi) |
| args.shape_set = load_confusion_set(args.shape_confusion, hanzi) |
| args.hanzi = list(hanzi) |
|
|
| |
| args.ltp = LTP(args.ltp_model) |
|
|
| output = open(args.output, mode="w") |
| with open(args.input, mode="r", encoding="utf-8") as handle: |
| for line in tqdm(handle): |
| sent = line.strip() |
| if len(sent) < 4: |
| continue |
| source = do_mask(sent, args) |
| label = int(source != sent) |
| output.write( |
| json.dumps({"source": source, "target": sent, "label": label}, ensure_ascii=False) |
| ) |
| output.write("\n") |
|
|
|
|