| import time |
| import requests |
| import datasets |
| import pandas as pd |
| from tqdm.auto import tqdm |
| from bs4 import BeautifulSoup |
|
|
|
|
| BASE_URL = "https://learning.aljazeera.net" |
|
|
|
|
| def extract_instruction(page_route, index): |
| soup = BeautifulSoup(requests.get(f"{BASE_URL}{page_route}").text, "html.parser") |
| question = soup.find("div", {"class": "ask-question"}).text.replace("السؤال :", "") |
| answer = soup.find("div", {"class": "ask-answer"}).text.replace("الجواب :", "") |
| return { |
| "instruction": question.strip(), |
| "output": answer.strip(), |
| "index": index, |
| "instruction_en": "", |
| "output_en": "", |
| } |
|
|
|
|
| index = 9229 |
| instructions = [] |
| for page in tqdm(range(93), leave=93): |
| time.sleep(2) |
| soup = BeautifulSoup( |
| requests.get(f"{BASE_URL}/ar/asktheteacher?page={page}", BASE_URL).text, |
| "html.parser", |
| ) |
| for href_tag in soup.find_all("a", string="الجواب"): |
| href_link = href_tag.get("href") |
| try: |
| instructions.append( |
| extract_instruction(page_route=href_link, index=str(index)) |
| ) |
| index += 1 |
| except Exception as e: |
| print("cannot read from page route:", href_link) |
|
|
| dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=instructions)) |
|
|
| dataset.push_to_hub("AskTheTeacherDataset") |