| import numpy as np |
| from numba import njit |
| import math |
| import random |
| import pickle |
| import gradio as gr |
| import threeletterai |
|
|
| def text_to_arr(text: str): |
| return np.array([ord(x) for x in text.lower()]) |
|
|
| @njit |
| def longest_common_substring(s1, s2): |
| current_match_start = -1 |
| current_match_end = -1 |
|
|
| best_match_start = current_match_start |
| best_match_end = current_match_end |
|
|
| min_len = min(len(s1), len(s2)) |
| for i in range(min_len): |
| if s1[i] == s2[i]: |
| current_match_start = current_match_end = i |
| j = 0 |
| while s1[i+j] == s2[i+j] and i+j < min_len: |
| j += 1 |
| current_match_end = current_match_start + j |
|
|
| if current_match_end - current_match_start > best_match_end - best_match_start: |
| best_match_start = current_match_start |
| best_match_end = current_match_end |
|
|
| return s1[best_match_start:best_match_end] |
|
|
|
|
| with open("dataset.txt", "r") as f: |
| lines = f.read().rstrip("\n").split("\n")[:40000] |
|
|
| def not_found_in(q, data): |
| for l in data: |
| count = 0 |
| lq = len(q)-1 |
| for v in l: |
| if v == q[count]: |
| count += 1 |
| else: |
| count = 0 |
| if count == lq: |
| return False |
| return True |
|
|
| class Layer: |
| def __init__(self, mem_len: int = 100, max_size: int = 6): |
| self.mem_len = mem_len |
| self.common_strings = [] |
| self.previously_seen = [] |
| self.max_size = max_size+1 |
| def __call__(self, input_arr, training: bool = True): |
| o = [] |
| li = len(input_arr) |
| for i in range(li): |
| for y, common_substring in enumerate(self.common_strings): |
| if (i+common_substring.shape[0]) <= li and (input_arr[i:i+common_substring.shape[0]] == common_substring).all(): |
| o.append(y) |
| if training: |
| current_max_len = 0 |
| n = None |
| for i, line in enumerate(self.previously_seen): |
| t = longest_common_substring(input_arr, line) |
| l = len(t) |
| if l > current_max_len and l < self.max_size: |
| current_max_len = l |
| n = i |
| result = t |
| if self.previously_seen != []: |
| if n is not None and len(result) > 1: |
| self.previously_seen.pop(n) |
| if not_found_in(result, self.common_strings): |
| self.common_strings.append(result) |
| self.previously_seen = self.previously_seen[-self.mem_len:] |
| self.previously_seen.append(input_arr) |
| return o |
|
|
| with open("l1_large.pckl", "rb") as f: layer = pickle.load(f) |
| with open("l2_large.pckl", "rb") as f: layer2 = pickle.load(f) |
| with open("w1_large.pckl", "rb") as f: w = pickle.load(f) |
| with open("w2_large.pckl", "rb") as f: w2 = pickle.load(f) |
|
|
| def generate(msg): |
| if len(msg) < 4: |
| return threeletterai.getresp(msg) |
| processed = layer(text_to_arr(msg), training=False) |
| processed = np.array(processed) |
| processed2 = layer2(processed, training=False) |
| |
| |
| o = np.zeros(40000, dtype=np.int16) |
| for a in processed: |
| if a in w: |
| o[w[a]] += 1 |
| for a in processed2: |
| if a in w2: |
| o[w2[a]] += 1 |
| return lines[np.argmax(o)] |
|
|
| app = gr.Interface(fn=generate, inputs="text", outputs="text") |
| app.launch() |