| |
| import os |
| import re |
| import time |
|
|
| import markdown |
| import nomic |
| import numpy as np |
| import pandas as pd |
| from nomic import atlas |
| from nomic.dataset import AtlasClass |
| from nomic.data_inference import NomicTopicOptions |
|
|
| from src.my_logger import setup_logger |
|
|
| NOMIC_KEY = os.getenv('NOMIC_KEY') |
| nomic.login(NOMIC_KEY) |
| sleep_time = int(os.getenv('NOMIC_SLEEP_TIME', 60)) |
| logger = setup_logger(__name__) |
|
|
| |
| subreddit_re = re.compile(r'[^e]r/(\w+)') |
|
|
|
|
| def count_words(text): |
| words = text.split() |
| return len(words) |
|
|
|
|
| def preprocess_markdown(text): |
| |
| spoiler_style = 'background-color: black; color: black;' |
| hover_style = 'color: inherit;' |
|
|
| |
| text = re.sub( |
| r'\>\!(.*?)\!\<', |
| r'<span class="spoiler" style="' + spoiler_style + '" onmouseover="this.style.color=\'' + hover_style + '\'" onmouseout="this.style.color=\'black\'">\1</span>', |
| text |
| ) |
| return text |
|
|
|
|
| def convert_markdown_to_html(text): |
| processed_text = preprocess_markdown(text) |
| html = markdown.markdown(processed_text, extensions=['mdx_linkify']) |
| return html |
|
|
|
|
| def extract_subreddit(text): |
| match = subreddit_re.search(text) |
| if match: |
| return 'r/' + match.group(1) |
| return '' |
|
|
|
|
| def delete_old_nomic(): |
| logger.info(f"Trying to delete old version of nomic Atlas...") |
| try: |
| ac = AtlasClass() |
| atlas_id = ac._get_dataset_by_slug_identifier("derek2/boru-subreddit-neural-search")['id'] |
| ac._delete_project_by_id(atlas_id) |
| logger.info(f"Succeeded in deleting old version of nomic Atlas.") |
| |
| |
| logger.info(f"Sleeping for {sleep_time}s to wait for old version deletion on the server-side") |
| time.sleep(sleep_time) |
| except Exception as e: |
| logger.info(f"Failed to delete old version of nomic Atlas. Error: {e}") |
|
|
|
|
|
|
| def preprocess_markdown(text): |
| |
| spoiler_style = 'background-color: black; color: black;' |
| hover_style = 'color: inherit;' |
|
|
| |
| text = re.sub( |
| r'\>\!(.*?)\<\!', |
| r'<span class="spoiler" style="' + spoiler_style + '" onmouseover="this.style.color=\'' + hover_style + '\'" onmouseout="this.style.color=\'black\'">\1</span>', |
| text |
| ) |
| return text |
|
|
|
|
| def build_nomic(dataset): |
| df = dataset['train'].to_pandas() |
|
|
| |
| df = df[~df[['content', 'title', 'flair', 'permalink']].apply( |
| lambda x: x.str.contains('nsfw', case=False, na=False)).any(axis=1) & ~df['nsfw']] |
|
|
| non_embedding_columns = ['date_utc', 'title', 'flair', 'poster', 'url', 'id', 'word_count', |
| 'score', 'score_percentile', 'html_content', 'subreddit'] |
|
|
| |
| percentiles = df['score'].quantile([0, .1, .2, .3, .4, .5, .6, .7, .8, .9]).tolist() |
|
|
| |
| bins = sorted(set(percentiles + [df['score'].max()])) |
|
|
| |
| |
| labels = [int(i * 10) for i in range(len(bins) - 1)] |
|
|
| |
| |
| df['score_percentile'] = pd.cut(df['score'], bins=bins, labels=labels, include_lowest=True) |
|
|
| df['word_count'] = df['content'].apply(count_words) |
| df['url'] = 'https://www.reddit.com' + df['permalink'] |
| df['html_content'] = df['content'].apply(convert_markdown_to_html) |
|
|
| |
| df['subreddit'] = df['content'].apply(extract_subreddit) |
|
|
| topic_options = NomicTopicOptions(build_topic_model=True) |
| topic_options.topic_label_field = 'html_content' |
|
|
| delete_old_nomic() |
|
|
| |
| logger.info(f"Trying to create new version of Atlas...") |
| project = atlas.map_data(embeddings=np.stack(df['embedding'].values), |
| data=df[non_embedding_columns].to_dict(orient='records'), |
| id_field='id', |
| identifier='BORU Subreddit Neural Search', |
| topic_model=topic_options |
| ) |
| logger.info(f"Succeeded in creating new version of nomic Atlas: {project.slug}") |
|
|