| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import json |
| import hashlib |
|
|
| from downloader_extractor_utils import * |
|
|
| URL = 'url' |
| HEADLINE = 'headline' |
| ABSTRACT = 'abstract' |
| TEXT = 'text' |
| SECTION = 'section' |
| SUBDOMAIN = 'subdomain' |
| FILENAME = 'filename' |
| OFFSET = 'offset' |
| LENGTH = 'length' |
| PUBLISHED = 'published' |
| DATASET = 'dataset' |
|
|
| class Extractor(object): |
| @staticmethod |
| def extract_document(input_document): |
| parsed_content = Extractor._parse(input_document) |
| Extractor._add_checksum(parsed_content) |
| return parsed_content |
|
|
| @staticmethod |
| def _parse(entry_dict): |
| |
| parsed_document = Extractor._parse_entry(entry_dict) |
| |
| for section_key in [HEADLINE, ABSTRACT, TEXT]: |
| parsed_document[section_key].encode() |
| parsed_document[section_key] = parsed_document[section_key].replace('\r', '').replace(' ', ' ') |
| parsed_document[section_key] = re.sub(r'\[celá zpráva]', ' ', parsed_document[section_key], 0, re.I) |
| if section_key in [HEADLINE, ABSTRACT]: parsed_document[section_key] = parsed_document[section_key].replace('\n', ' ') |
| parsed_document[section_key] = re.sub(r'[ \t\xA0\u2028]+', ' ', parsed_document[section_key].strip()) |
| parsed_document[section_key] = re.sub(r'[ ]*\n[ ]*', '\n', parsed_document[section_key]) |
| parsed_document[section_key] = re.sub(r"['`‚‘’]{1,2}", '"', parsed_document[section_key]) |
| parsed_document[section_key] = re.sub(r"[„“]", '"', parsed_document[section_key]) |
| return parsed_document |
|
|
| @staticmethod |
| def _parse_entry(contents): |
| """Parse one commoncrawl JSON. |
| |
| Return: |
| - error status |
| - text |
| - abstract |
| - headline |
| - subdomain (i.e., domaci.novinky.cz) |
| |
| More specifically: |
| return status, url, abstract_len, document_len, headline, abstract, document, section, subdomain, filename |
| """ |
| url = contents['url'] |
| parse = urlparse(url) |
| domain = '.'.join(parse.netloc.rsplit('.', maxsplit=2)[-2:]) |
| subdomain = parse.netloc.replace('www.', '') |
| if domain == subdomain: |
| section = (parse.path.split('/') + [''])[1] |
| if not section.isalnum(): |
| section = subdomain |
| else: |
| section = subdomain |
| if 'blog' in section: |
| section = 'blogs' |
|
|
| |
| |
| |
| domain_settings = domain_settings_dict.get(subdomain, domain_settings_dict.get(domain, None)) |
|
|
| |
| |
| |
| try: |
| warc = contents['content'].encode('latin-1').decode('utf-8') |
| except: |
| warc = contents['content'].encode('latin-1').decode(domain_settings.encoding) |
| html = warc.split('\r\n\r\n', maxsplit=2)[-1].replace('\r', '') |
| soup = BeautifulSoup(html, 'html.parser') |
|
|
| |
| for br in soup('br'): |
| br.replace_with('\n') |
|
|
| |
| headline_text = domain_settings.headline_extractor(soup) |
|
|
| |
| abstract_text = domain_settings.abstract_extractor(soup) |
|
|
| |
| document_text = domain_settings.document_extractor(soup, domain) |
|
|
| published = domain_settings.date_extractor(soup) |
| if published is None: published = "" |
|
|
| return {URL: url, |
| HEADLINE: headline_text, |
| ABSTRACT: abstract_text, |
| TEXT: document_text, |
| SECTION: section, |
| SUBDOMAIN: subdomain, |
| FILENAME: contents[FILENAME], |
| OFFSET: contents[OFFSET], |
| LENGTH: contents[LENGTH], |
| PUBLISHED: published, |
| DATASET: contents[DATASET]} |
|
|
| @staticmethod |
| def _add_checksum(json_data): |
| json_data_for_checksum = {} |
|
|
| for field in ["headline", "abstract", "text", "section", "subdomain", "published", "url"]: |
| json_data_for_checksum[field] = json_data[field] |
|
|
| string_for_checksum = json.dumps(json_data_for_checksum, |
| ensure_ascii=True, |
| sort_keys=True, |
| indent=None, |
| separators=(",", ":")) |
|
|
| json_data['md5'] = hashlib.md5(string_for_checksum.encode('utf-8')).hexdigest() |
|
|
| return json_data |
|
|