| |
| """ |
| Collect relevant metadata from local UD directories: |
| - extracting a markdown Summary section and machine-readable metadata |
| from README.{md,txt} |
| - using the UD directory name for collecting metadata from the |
| codes_and_flags.yaml file |
| - collecting {dev,train,test}.conllu files. |
| """ |
|
|
| import json |
| import os |
| import xml.etree.ElementTree as ET |
| import argparse |
| import logging |
| import re |
| from collections import defaultdict |
| from pathlib import Path |
|
|
| import yaml |
| from dotenv import load_dotenv |
|
|
|
|
| TOOLS_DIR = Path(__file__).resolve().parent |
|
|
| load_dotenv(TOOLS_DIR / ".env") |
| UD_VER = os.getenv('UD_VER', "2.17") |
|
|
| |
| parser = argparse.ArgumentParser(description=__doc__, |
| formatter_class=argparse.RawDescriptionHelpFormatter) |
| parser.add_argument('-o', '--override', action='store_true', |
| help='override output file if it already exists') |
| parser.add_argument('--ud-ver', default=UD_VER, |
| help='UD version to process (default: UD_VER from env or 2.17)') |
| parser.add_argument('-v', '--verbose', action='count', default=0, |
| help='increase verbosity level') |
| args = parser.parse_args() |
| UD_VER = args.ud_ver |
|
|
| |
| logging.basicConfig( |
| level = max(logging.DEBUG, logging.WARNING - args.verbose * 10), |
| format='%(asctime)s [%(levelname)s] %(message)s', |
| datefmt='%Y-%m-%d %H:%M:%S', |
| force=True |
| ) |
|
|
| GENUS_SEPARATED_FROM_UD_VERSION = (2, 15) |
| SUMMARY_HEADING_SCAN_LIMIT = 120 |
| SUMMARY_HEADING_RE = re.compile(r"^#{1,2}\s+summary\s*$", re.IGNORECASE) |
| MARKDOWN_HEADING_RE = re.compile(r"^#{1,6}\s+\S") |
|
|
|
|
| def parse_ud_version(version: str) -> tuple[int, ...] | None: |
| """ |
| Parse UD version strings like "2.14" or "2.15.1" into integer tuples. |
| Returns None if parsing fails (e.g. "latest"). |
| """ |
| try: |
| return tuple(int(part) for part in version.split(".")) |
| except ValueError: |
| return None |
|
|
|
|
| def normalize_codes_and_flags_entry(entry: dict, ud_version: str) -> dict: |
| """ |
| Normalize family/genus fields from codes_and_flags YAML across UD versions. |
| |
| Before UD 2.15, genus was often encoded in "family" as "Family, Genus". |
| From UD 2.15 onward, genus is expected as a dedicated "genus" field. |
| """ |
| normalized = dict(entry) |
| version_tuple = parse_ud_version(ud_version) |
| if version_tuple is None or version_tuple >= GENUS_SEPARATED_FROM_UD_VERSION: |
| return normalized |
|
|
| family = normalized.get("family") |
| genus = normalized.get("genus") |
| if isinstance(family, str) and (not genus) and "," in family: |
| family_part, genus_part = family.split(",", 1) |
| normalized["family"] = family_part.strip() |
| genus_part = genus_part.strip() |
| if genus_part: |
| normalized["genus"] = genus_part |
|
|
| return normalized |
|
|
|
|
| def find_summary_bounds(lines: list[str]) -> tuple[int, int] | None: |
| """ |
| Find the markdown Summary section bounds. |
| |
| We look for an H1/H2 heading named "Summary" near the beginning |
| of the file and collect lines until the next markdown heading. |
| Returns (start_index, end_index_exclusive) for summary content lines. |
| """ |
| scan_limit = min(len(lines), SUMMARY_HEADING_SCAN_LIMIT) |
| summary_heading_idx = None |
|
|
| for i in range(scan_limit): |
| line = lines[i].strip() |
| if SUMMARY_HEADING_RE.match(line): |
| summary_heading_idx = i |
| break |
|
|
| if summary_heading_idx is None: |
| return None |
|
|
| start = summary_heading_idx + 1 |
| end = len(lines) |
| for i in range(start, len(lines)): |
| if MARKDOWN_HEADING_RE.match(lines[i].strip()): |
| end = i |
| break |
| return (start, end) |
|
|
|
|
| def extract_metadata(file_path) -> {}: |
| """ |
| Collect relevant metadata from UD directories. |
| |
| Args: |
| file_path (str): The path to the README.{md,txt} file. |
| |
| Returns: |
| dict: The extracted metadata. |
| """ |
| metadata = { |
| "summary": None, |
| "license": None, |
| "genre": None, |
| "lemmas": None, |
| "upos": None, |
| "xpos": None, |
| "language": None, |
| "flag": None, |
| "lcode": None, |
| "iso3": None, |
| "family": None, |
| "genus": None, |
| "blocked": None, |
| |
| |
| |
| |
| } |
|
|
| with open(file_path, 'r') as file: |
| lines = [line.strip() for line in file.readlines()] |
| summary_bounds = find_summary_bounds(lines) |
| metadata_scan_start = 0 |
| if summary_bounds is not None: |
| summary_start, summary_end = summary_bounds |
| metadata_scan_start = summary_end |
| summary = (' '.join(lines[summary_start:summary_end])).strip() |
| if summary: |
| metadata["summary"] = summary |
|
|
| |
| |
| for line in lines[metadata_scan_start:]: |
| if ":" in line: |
| key, val = line.split(":", 1) |
| if key.lower() in metadata.keys(): |
| if key.lower() == "genre": |
| val = val.strip().split(" ") |
| else: |
| val = val.strip() |
| metadata[key.lower()] = val |
| return metadata |
|
|
|
|
| def traverse_directory(directory): |
| """ |
| Traverses the directory and its first-level subdirectories, finds the |
| specified files, and extracts the summary from the README.{md,txt} file. |
| |
| Args: |
| directory (str): The path to the directory. |
| |
| Returns: |
| dict: A dictionary containing the extracted summaries for each file. |
| """ |
| results = defaultdict(lambda: defaultdict(dict)) |
|
|
| codes_and_flags_path = TOOLS_DIR / "etc" / f"codes_and_flags-{UD_VER}.yaml" |
| with codes_and_flags_path.open('r') as file: |
| codes_and_flags = yaml.safe_load(file) |
| logging.debug(codes_and_flags) |
|
|
| for item in os.listdir(directory): |
| if item.startswith("."): |
| continue |
|
|
| if os.path.isdir(os.path.join(directory, item)): |
| dir_path = os.path.join(directory, item) |
| logging.debug(dir_path) |
|
|
| tag_fn = os.path.join(dir_path, f".tag-r{UD_VER}") |
| has_version_tag = Path(tag_fn).exists() |
| if not has_version_tag: |
| |
| logging.debug(f"No tag file found (expected for snapshots): {tag_fn}") |
|
|
| results[item]["splits"] = { |
| "train": {"files": [], "num_bytes": 0}, |
| "dev": {"files": [], "num_bytes": 0}, |
| "test": {"files": [], "num_bytes": 0}, |
| "unknown": {"files": [], "num_bytes": 0} |
| } |
| for file in os.listdir(dir_path): |
| if file.endswith(".conllu"): |
| file_path = os.path.join(dir_path, file) |
| if has_version_tag: |
| source_path = os.path.join(item, f"r{UD_VER}", file) |
| else: |
| source_path = os.path.join(item, file) |
| logging.debug(file_path) |
| match file: |
| case x if "dev" in file: |
| subset = "dev" |
| case x if "test" in file: |
| subset = "test" |
| case x if "train" in file: |
| subset = "train" |
| case _: |
| subset = "unknown" |
| results[item]["splits"][subset]["files"].append(source_path) |
|
|
| sum_bytes = os.stat(file_path).st_size |
| results[item]["splits"][subset]["num_bytes"] += sum_bytes |
|
|
| elif file.startswith("README") and (file.endswith( |
| tuple(["md", "txt"]))): |
| results[item].update( |
| extract_metadata(os.path.join(dir_path, file))) |
| |
|
|
| elif file == "stats.xml": |
| |
| |
| |
| |
| |
| |
| |
| |
| tree = ET.parse(os.path.join(dir_path, file)) |
| root = tree.getroot() |
| size_node = root.find('.//size') |
| if size_node is None: |
| continue |
|
|
| for child_node_name in ["train", "dev", "test"]: |
|
|
| child_node = size_node.find(child_node_name) |
| if child_node is None: |
| continue |
|
|
| for child_child_node_name in ["sentences", "tokens", "words", "fused"]: |
| value_node = child_node.find(child_child_node_name) |
| if value_node is None: |
| continue |
| value = value_node.text |
| |
|
|
| if value and int(value) > 0: |
| results[item]["splits"][child_node_name][f"num_{child_child_node_name}"] = value |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| language = item[3:].rsplit("-", 1)[0].replace("_", " ") |
| results[item]["language"] = language |
| |
| normalized_codes_entry = normalize_codes_and_flags_entry( |
| codes_and_flags[language], UD_VER |
| ) |
| results[item].update(normalized_codes_entry) |
|
|
| |
| |
| |
| corpus_name = item[3:].rsplit("-", 1)[1].lower() |
| name = f"{results[item]["lcode"]}_{corpus_name}" |
| results[item]["name"] = name |
|
|
| |
| |
| for fileset_k,fileset_v in list(results[item]["splits"].items()): |
| if not fileset_v["files"]: |
| del results[item]["splits"][fileset_k] |
|
|
| |
| missing_summary = not results[item]["summary"] |
| missing_files = not any(value["files"] for value in results[item]["splits"].values()) |
| missing_license = not results[item]["license"] |
|
|
| if missing_summary or missing_files or missing_license: |
| if missing_summary: |
| logging.warning("ITEM DELETED - no summary: %s", item) |
| if missing_files: |
| logging.debug(results[item]["splits"]) |
| logging.warning("ITEM DELETED - no files: %s", item) |
| if missing_license: |
| logging.warning("ITEM DELETED - no license: %s", item) |
| del results[item] |
|
|
| |
| for key in list(results.keys()): |
| name = results[key]["name"] |
| del results[key]["name"] |
| results[key]["dirname"] = key |
| results[name] = results.pop(key) |
|
|
| return results |
|
|
| if __name__ == '__main__': |
| directory = TOOLS_DIR / 'UD_repos' |
| results = traverse_directory(directory) |
|
|
| |
| blocked_treebanks_file = TOOLS_DIR / 'blocked_treebanks.yaml' |
| if blocked_treebanks_file.exists(): |
| with open(blocked_treebanks_file, 'r') as fh: |
| blocked_data = yaml.safe_load(fh) |
| if blocked_data: |
| blocked_treebanks = {k: v for k, v in blocked_data.items() if v is not None} |
| for name in results.keys(): |
| if name in blocked_treebanks: |
| results[name]["blocked"] = True |
| logging.info(f"Marked {name} as blocked: {blocked_treebanks[name]['reason']}") |
| else: |
| results[name]["blocked"] = False |
| else: |
| |
| for name in results.keys(): |
| results[name]["blocked"] = False |
| else: |
| logging.warning(f"Blocked treebanks file not found: {blocked_treebanks_file}") |
| |
| for name in results.keys(): |
| results[name]["blocked"] = False |
|
|
| for name, metadata in results.items(): |
| logging.debug("Directory: %s", metadata["dirname"]) |
|
|
| |
| output_path = TOOLS_DIR / f"metadata-{UD_VER}.json" |
| should_write = args.override or (not output_path.exists()) or output_path.stat().st_size == 0 |
| if should_write: |
| with output_path.open('w') as fh: |
| json.dump(results, fh, ensure_ascii=False, sort_keys=True, |
| indent=4, separators=(',', ': ')) |
| print(f"{output_path.name} written") |
| else: |
| logging.info("Output %s already exists: Not overriding.", output_path.name) |
|
|