| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Daily sync for huggingface/trending-papers-x dataset. |
| Indexes new papers and updates GitHub/project URLs via HF Papers API. |
| |
| Run locally: uv run daily_papers_sync.py |
| Run as HF Job: hf jobs uv run daily_papers_sync.py --secrets HF_TOKEN |
| """ |
|
|
| from __future__ import annotations |
|
|
| import os |
| import re |
| import time |
| from datetime import datetime, timedelta, timezone |
| from typing import Any, Optional |
| from urllib.parse import urlparse |
|
|
| import requests |
| from datasets import load_dataset |
|
|
| REPO_ID = "huggingface/trending-papers-x" |
| API_BASE = "https://huggingface.co/api" |
| HARD_LIMIT = 150 |
| HOURS_LOOKBACK = 24 |
|
|
| |
| _ARXIV_URL_RE = re.compile(r"https?://(?:www\.)?arxiv\.org/(?:abs|pdf)/(?P<id>[^?#]+)", re.I) |
| _ARXIV_NEW_RE = re.compile(r"^\d{4}\.\d{4,5}$") |
|
|
|
|
| def normalize_arxiv_id(value: Any) -> Optional[str]: |
| """Extract and validate arXiv ID from various formats.""" |
| if not value: |
| return None |
| s = str(value).strip() |
|
|
| |
| if m := _ARXIV_URL_RE.search(s): |
| s = m.group("id") |
|
|
| s = s.strip().rstrip("/") |
| if s.lower().endswith(".pdf"): |
| s = s[:-4] |
| if s.lower().startswith("arxiv:"): |
| s = s[6:] |
|
|
| |
| s = re.sub(r"v\d+$", "", s) |
|
|
| |
| if not _ARXIV_NEW_RE.fullmatch(s): |
| return None |
|
|
| |
| month = int(s[2:4]) |
| return s if 1 <= month <= 12 else None |
|
|
|
|
| def normalize_github_repo(value: Any) -> Optional[str]: |
| """Extract and normalize GitHub repo URL.""" |
| if not value: |
| return None |
| s = str(value).strip() |
|
|
| if s.startswith("git@github.com:"): |
| s = f"https://github.com/{s[15:]}" |
| elif s.startswith("github.com/"): |
| s = f"https://{s}" |
|
|
| p = urlparse(s) |
| if p.scheme not in ("http", "https"): |
| return None |
|
|
| host = (p.netloc or "").lower().removeprefix("www.") |
| if host != "github.com": |
| return None |
|
|
| parts = [x for x in p.path.split("/") if x] |
| if len(parts) < 2: |
| return None |
|
|
| owner, repo = parts[0], parts[1].removesuffix(".git") |
| return f"https://github.com/{owner}/{repo}" |
|
|
|
|
| def normalize_url(value: Any) -> Optional[str]: |
| """Validate and normalize a URL.""" |
| if not value: |
| return None |
| s = str(value).strip() |
| p = urlparse(s) |
| return s if p.scheme in ("http", "https") and p.netloc else None |
|
|
|
|
| def parse_date(value: Any) -> Optional[datetime]: |
| """Parse date string into datetime.""" |
| if isinstance(value, datetime): |
| return value.replace(tzinfo=timezone.utc) if value.tzinfo is None else value |
| if not value: |
| return None |
|
|
| for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]: |
| try: |
| dt = datetime.strptime(str(value).strip(), fmt) |
| return dt.replace(tzinfo=timezone.utc) |
| except ValueError: |
| continue |
| return None |
|
|
|
|
| def get_token() -> str: |
| """Get HF token from environment or huggingface-cli.""" |
| if token := os.environ.get("HF_TOKEN", "").strip(): |
| return token |
| try: |
| from huggingface_hub import HfFolder |
| return (HfFolder.get_token() or "").strip() |
| except Exception: |
| return "" |
|
|
|
|
| def get_paper(session: requests.Session, arxiv_id: str) -> Optional[dict]: |
| """Fetch paper from API, returns None if not found.""" |
| try: |
| r = session.get(f"{API_BASE}/papers/{arxiv_id}", timeout=30) |
| return r.json() if r.status_code == 200 else None |
| except Exception: |
| return None |
|
|
|
|
| def index_paper(session: requests.Session, arxiv_id: str) -> bool: |
| """Index a paper by arXiv ID. Returns True on success.""" |
| try: |
| r = session.post(f"{API_BASE}/papers/index", json={"arxivId": arxiv_id}, timeout=30) |
| return r.status_code == 200 |
| except Exception: |
| return False |
|
|
|
|
| def update_paper_links( |
| session: requests.Session, |
| arxiv_id: str, |
| github_repo: Optional[str] = None, |
| project_page: Optional[str] = None, |
| ) -> bool: |
| """Update GitHub repo and/or project page for a paper.""" |
| payload = {} |
| if github_repo: |
| payload["githubRepo"] = github_repo |
| if project_page: |
| payload["projectPage"] = project_page |
|
|
| if not payload: |
| return False |
|
|
| try: |
| r = session.post(f"{API_BASE}/papers/{arxiv_id}/links", json=payload, timeout=30) |
| return r.status_code == 200 |
| except Exception: |
| return False |
|
|
|
|
| def main() -> None: |
| token = get_token() |
| if not token: |
| print("ERROR: HF token not found. Set HF_TOKEN or run `huggingface-cli login`.") |
| exit(1) |
|
|
| session = requests.Session() |
| session.headers.update({ |
| "Content-Type": "application/json", |
| "Authorization": f"Bearer {token}", |
| }) |
|
|
| cutoff_time = datetime.now(timezone.utc) - timedelta(hours=HOURS_LOOKBACK) |
|
|
| print(f"Dataset: {REPO_ID}") |
| print(f"Lookback: {HOURS_LOOKBACK}h (since {cutoff_time.strftime('%Y-%m-%d %H:%M UTC')})") |
| print(f"Limit: {HARD_LIMIT} papers") |
| print("-" * 50) |
|
|
| dataset = load_dataset(REPO_ID, split="train", streaming=True) |
|
|
| stats = {"indexed": 0, "github": 0, "project": 0, "not_found": 0, "skipped": 0} |
| processed = 0 |
|
|
| for row in dataset: |
| if processed >= HARD_LIMIT: |
| break |
|
|
| arxiv_id = normalize_arxiv_id(row.get("arxiv_id") or row.get("paper_id")) |
| if not arxiv_id: |
| continue |
|
|
| |
| date_str = row.get("date") or row.get("published_at") or row.get("created_at") |
| if (paper_date := parse_date(date_str)) and paper_date < cutoff_time: |
| stats["skipped"] += 1 |
| continue |
|
|
| |
| github_repo = normalize_github_repo(row.get("github") or row.get("github_url")) |
| project_page = normalize_url(row.get("project_page_url") or row.get("project_page")) |
|
|
| |
| if not github_repo and not project_page: |
| continue |
|
|
| processed += 1 |
|
|
| |
| paper = get_paper(session, arxiv_id) |
| just_indexed = False |
|
|
| |
| if paper is None: |
| if index_paper(session, arxiv_id): |
| stats["indexed"] += 1 |
| just_indexed = True |
| print(f"INDEXED: {arxiv_id}") |
| time.sleep(30) |
| else: |
| stats["not_found"] += 1 |
| print(f"SKIP: {arxiv_id} - could not index") |
| continue |
|
|
| |
| has_github = False if just_indexed else bool(paper.get("githubRepo")) |
| has_project = False if just_indexed else bool(paper.get("projectPage")) |
|
|
| github_to_set = github_repo if github_repo and not has_github else None |
| project_to_set = project_page if project_page and not has_project else None |
|
|
| if not github_to_set and not project_to_set: |
| print(f"SKIP: {arxiv_id} - already has links") |
| continue |
|
|
| |
| if update_paper_links(session, arxiv_id, github_to_set, project_to_set): |
| if github_to_set: |
| stats["github"] += 1 |
| print(f"SET GITHUB: {arxiv_id} -> {github_to_set}") |
| if project_to_set: |
| stats["project"] += 1 |
| print(f"SET PROJECT: {arxiv_id} -> {project_to_set}") |
| else: |
| print(f"ERROR: {arxiv_id} - failed to update links") |
|
|
| print("-" * 50) |
| print(f"Processed: {processed}") |
| print(f"Indexed: {stats['indexed']}") |
| print(f"GitHub added: {stats['github']}") |
| print(f"Project added: {stats['project']}") |
| print(f"Not found: {stats['not_found']}") |
| print(f"Skipped (old): {stats['skipped']}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|