| import os |
| import pandas as pd |
| import urllib3 |
| import json |
| from bs4 import BeautifulSoup |
| import numpy as np |
| from concurrent.futures import ThreadPoolExecutor |
| from concurrent.futures import Future |
| from traitlets import List |
|
|
| from reddit.reddit_info import subreddit_name_l, subreddit_sort_l, subreddit_t_l |
| import itertools |
| import random |
| from pathlib import Path |
|
|
| from tqdm import tqdm |
|
|
| from datetime import datetime, timezone |
| from typing import Any, Optional |
| from requests.utils import requote_uri |
| from random_word import RandomWords |
| from wonderwords import RandomSentence |
|
|
|
|
| class RedditProcessor: |
| def get_subreddit_url(self, subreddit, sort_by:str = "hot", sort_time:str="all", limit:int=100, query:Optional[str]=None): |
| if not query: |
| return f'https://www.reddit.com/r/{subreddit}/{sort_by}/.json?raw_json=1&t={sort_time}&limit={limit}' |
| else: |
| return f'https://www.reddit.com/r/{subreddit}/search/.json?raw_json=1&q={query}&limit={100}' |
| |
| |
| def fetch_subreddit_image_entries(self, subreddit_url: str, pool_manager): |
| result = [ ] |
| try: |
| response = pool_manager.request('GET', subreddit_url) |
| |
| subreddit_data = json.loads(response.data) |
| |
| if not "data" in subreddit_data: return [] |
| if not "children" in subreddit_data["data"]: return [] |
| |
| for content in subreddit_data['data']['children']: |
| try: |
| if content['data'].get('post_hint', 'none') == 'image' and 'preview' in content['data']: |
| created_utc = datetime.fromtimestamp(content['data']["created_utc"], timezone.utc) |
| |
| |
| |
| source_d = content['data']['preview']['images'][0]['source'] |
| image_url, width, height = source_d['url'], source_d["width"], source_d["height"] |
| image_title = content['data']['title'] |
| image_id = content['data']['id'] |
| data_url = content['data']['url'] |
| subreddit = content['data']['subreddit'] |
| if content['data']['is_video'] : continue |
| result.append({ |
| "image_url" : image_url, |
| "title" : image_title, |
| "image_id" : image_id, |
| "url" : data_url, |
| "subreddit" : subreddit, |
| "width" : width, |
| "height" : height, |
| "created_utc" : created_utc, |
| }) |
| except Exception as e: |
| pass |
| return result |
| except Exception as e: |
| |
| return [] |
| |
| |
| def fetch_multiple_subreddit_image_entries(self, subreddit_urls: str, thread_pool_size: int=5, urllib_pool_size:int=5): |
| |
| pool_manager = urllib3.PoolManager(maxsize=urllib_pool_size) |
| thread_pool = ThreadPoolExecutor(thread_pool_size) |
| res_futs = [ ] |
| |
| for subreddit_url in subreddit_urls: |
| res_futs.append(thread_pool.submit(self.fetch_subreddit_image_entries, subreddit_url, pool_manager)) |
| |
| res :[List[Future]] = [] |
| |
| for r in res_futs: |
| res.extend(r.result()) |
| |
| return list({x["image_id"] : x for x in res}.values()) |
| |
| def get_random_subreddit_urls(self, num_urls:int = 20): |
| subr_l = list(itertools.product(subreddit_name_l, subreddit_sort_l, subreddit_t_l)) |
| return [self.get_subreddit_url(*xs, 100) for xs in random.sample(subr_l, k=num_urls)] |
|
|
|
|
| def get_random_subreddit_query_urls(self, num_urls:int = 20, query_type: str = "chronology"): |
| ''' |
| query_type: |
| chronology |
| random_word |
| random_phrase |
| ''' |
| timeline = random.choices(["days", "months", "years"], k = num_urls) |
| timevalue = random.choices(range(1, 12), k = num_urls) |
| subr = random.sample(subreddit_name_l, k = num_urls) |
| |
| if query_type == "chronology": |
| return [self.get_subreddit_url(subreddit=sr, query=f"{tv} {tl} ago") for (sr, tl, tv) in list(itertools.product(subr, timeline, timevalue))] |
| elif query_type == "random_word": |
| r = RandomWords() |
| return [self.get_subreddit_url(subreddit=sr, query=f"{r.get_random_word()}") for sr in subr] |
| elif query_type == "random_phrase": |
| s = RandomSentence() |
| return [self.get_subreddit_url(subreddit=sr, query=f"{s.sentence()}") for sr in subr] |
| else: |
| return [ ] |
| |
| |
| def __call__(self, reddit_out_file: os.PathLike) -> Any: |
| dfname = reddit_out_file |
| otime = 0 |
|
|
| tarr = [] |
| karr = [] |
|
|
| total_updates = 0 |
|
|
| with tqdm(total=10000) as pbar: |
| for _ in range(10000): |
| if random.random() > 0.6: |
| res = self.fetch_multiple_subreddit_image_entries(self.get_random_subreddit_urls(num_urls=100)) |
| else: |
| res = self.fetch_multiple_subreddit_image_entries( |
| self.get_random_subreddit_query_urls(num_urls=5, query_type="random_phrase")) |
| |
| num_fetched = len(res) |
| |
| if res: |
| if not Path(dfname).exists(): |
| pd.DataFrame(res).to_csv(dfname, index=False) |
| karr.append(len(res)) |
| else: |
| df = pd.read_csv(dfname) |
| keys = set(df["image_id"]) |
| cres = [x for x in res if not (x["image_id"] in keys)] |
| |
| if cres: |
| ndf = pd.DataFrame(cres) |
| ndf.to_csv(dfname, mode="a", header=None, index=False) |
| karr.append(len(cres)) |
| else: |
| karr.append(0) |
| |
| ntime = pbar.format_dict['elapsed'] |
| N = len(pd.read_csv(dfname)) |
| tarr.append(ntime-otime) |
| otime = ntime |
| tarr = tarr[-25:] |
| karr = karr[-25:] |
| rate = sum(karr)/sum(tarr) |
| pbar.update(1) |
| total_updates = total_updates + karr[-1] |
| pbar.set_description_str(f"count:{N}, fetch rate:{rate:.3f}, last_update:{karr[-1]}, total_updates:{total_updates}") |