| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import json |
| import os |
| import asyncio |
| import time |
|
|
| import pandas as pd |
| import aiohttp |
| import requests.utils |
| from dotenv import load_dotenv |
| from huggingface_hub import HfApi |
| from tenacity import retry, stop_after_attempt, wait_exponential |
| import pyarrow as pa |
| import pyarrow.parquet as pq |
|
|
| load_dotenv() |
|
|
| CACHE_DIR = ".hf_cache" |
| os.makedirs(CACHE_DIR, exist_ok=True) |
|
|
| |
| HF_TOKEN = os.environ.get("HF_TOKEN") |
| api = HfApi(token=HF_TOKEN) |
| USER_ID = api.whoami()["name"] |
| REPO_ID = f"{USER_ID}/hub-stats" |
|
|
| print(f"🚀 Hugging Face Hub Stats Collector") |
| print(f"📊 Dataset will be uploaded to: {REPO_ID}") |
| print(f"👤 User: {USER_ID}") |
| print("-" * 50) |
|
|
| ENDPOINT_CONFIGS = { |
| "models": { |
| "limit": 1000, |
| "params": { |
| "full": "true", |
| "config": "true", |
| "expand[]": [ |
| "gguf", |
| "downloadsAllTime", |
| "transformersInfo", |
| "cardData", |
| "safetensors", |
| "baseModels", |
| "author", |
| "likes", |
| "inferenceProviderMapping", |
| "downloads", |
| "siblings", |
| "tags", |
| "pipeline_tag", |
| "lastModified", |
| "createdAt", |
| "config", |
| "library_name", |
| ], |
| }, |
| }, |
| "datasets": { |
| "limit": 1000, |
| "params": { |
| "full": "true", |
| "expand[]": [ |
| "author", |
| "cardData", |
| "citation", |
| "createdAt", |
| "disabled", |
| "description", |
| "downloads", |
| "downloadsAllTime", |
| "gated", |
| "lastModified", |
| "likes", |
| "paperswithcode_id", |
| "private", |
| "siblings", |
| "sha", |
| "tags", |
| "trendingScore", |
| ], |
| }, |
| }, |
| "spaces": {"limit": 1000, "params": {"full": "true"}}, |
| "posts": {"limit": 50, "params": {"skip": 0}}, |
| "daily_papers": { |
| "limit": 50, |
| "params": {}, |
| "base_url": "https://huggingface.co/api/daily_papers", |
| }, |
| "arxiv_papers": { |
| "limit": 100, |
| "params": {}, |
| "base_url": "https://huggingface.co/api/papers", |
| }, |
| } |
|
|
|
|
| def parse_link_header(link_header): |
| if not link_header: |
| return None |
| links = requests.utils.parse_header_links(link_header) |
| for link in links: |
| if link.get("rel") == "next": |
| return link.get("url") |
| return None |
|
|
|
|
| def to_json_string(x): |
| if isinstance(x, (dict, list)): |
| return json.dumps(x) |
| if x is None or pd.isna(x): |
| return None |
| return str(x) |
|
|
|
|
| def stringify_nested_columns(df): |
| for col in df.columns: |
| if df[col].map(lambda value: isinstance(value, (dict, list))).any(): |
| df[col] = df[col].apply(to_json_string) |
| return df |
|
|
|
|
| def process_dataframe(df, endpoint): |
| if len(df) == 0: |
| return df |
|
|
| if endpoint == "posts": |
| if "author" in df.columns: |
| author_df = pd.json_normalize(df["author"]) |
| author_cols = ["avatarUrl", "followerCount", "fullname", "name"] |
| for col in author_cols: |
| if col in author_df.columns: |
| df[col] = author_df[col] |
| df = df.drop("author", axis=1) |
|
|
| for ts_col in ["publishedAt", "updatedAt"]: |
| if ts_col in df.columns: |
| df[ts_col] = pd.to_datetime(df[ts_col]).dt.tz_localize(None) |
|
|
| elif endpoint == "daily_papers": |
| if "paper" in df.columns: |
| paper_df = pd.json_normalize(df["paper"], errors="ignore").add_prefix( |
| "paper_" |
| ) |
| df = pd.concat([df.drop("paper", axis=1), paper_df], axis=1) |
|
|
| for ts_col in ["publishedAt", "paper_publishedAt"]: |
| if ts_col in df.columns: |
| df[ts_col] = pd.to_datetime(df[ts_col], errors="coerce").dt.tz_localize( |
| None |
| ) |
|
|
| elif endpoint == "arxiv_papers": |
| for ts_col in ["publishedAt", "submittedOnDailyAt"]: |
| if ts_col in df.columns: |
| df[ts_col] = pd.to_datetime(df[ts_col], errors="coerce").dt.tz_localize( |
| None |
| ) |
|
|
| else: |
| for field in ["createdAt", "lastModified"]: |
| if field in df.columns: |
| df[field] = pd.to_datetime(df[field], errors="coerce").dt.tz_localize( |
| None |
| ) |
|
|
| if "gated" in df.columns: |
| df["gated"] = df["gated"].astype(str) |
|
|
| for col in ["cardData", "config", "gguf"]: |
| if col in df.columns: |
| df[col] = df[col].apply(to_json_string) |
|
|
| if endpoint == "arxiv_papers": |
| df = stringify_nested_columns(df) |
|
|
| return df |
|
|
|
|
| def save_parquet(df, output_file): |
| df.to_parquet(output_file, index=False, engine="pyarrow") |
|
|
|
|
| @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=60)) |
| async def fetch_data_page(session, url, params=None, headers=None): |
| async with session.get(url, params=params, headers=headers) as response: |
| response.raise_for_status() |
| return await response.json(), response.headers.get("Link") |
|
|
|
|
| def jsonl_to_parquet(endpoint, jsonl_file, output_file): |
| if not os.path.exists(jsonl_file): |
| print(f"✗ {jsonl_file} not found") |
| return 0 |
|
|
| |
| all_dfs = [] |
| with open(jsonl_file, "r") as f: |
| for line in f: |
| line = line.strip() |
| if not line: |
| continue |
| data = json.loads(line) |
| if endpoint == "posts": |
| items = data.get("socialPosts", []) |
| else: |
| items = data |
|
|
| if not items: |
| continue |
|
|
| df = pd.DataFrame(items) |
| if df.empty: |
| continue |
|
|
| df = process_dataframe(df, endpoint) |
| all_dfs.append(df) |
|
|
| if not all_dfs: |
| print(f" No data found for {endpoint}") |
| return 0 |
|
|
| |
| combined_df = pd.concat(all_dfs, ignore_index=True) |
| total_rows = len(combined_df) |
|
|
| |
| row_group_size = 50_000 |
| table = pa.Table.from_pandas(combined_df, preserve_index=False) |
| writer = pq.ParquetWriter(output_file, table.schema) |
| for i in range(0, total_rows, row_group_size): |
| chunk = table.slice(i, min(row_group_size, total_rows - i)) |
| writer.write_table(chunk) |
| writer.close() |
|
|
| return total_rows |
|
|
|
|
| async def create_parquet_files(skip_upload=False): |
| start_time = time.time() |
| endpoints = [ |
| "daily_papers", |
| "arxiv_papers", |
| "models", |
| "spaces", |
| "datasets", |
| "posts", |
| ] |
| created_files = [] |
| jsonl_files = {} |
|
|
| async with aiohttp.ClientSession() as session: |
| for endpoint in endpoints: |
| print(f"Fetching {endpoint}...") |
|
|
| config = ENDPOINT_CONFIGS[endpoint] |
| base_url = config.get("base_url", f"https://huggingface.co/api/{endpoint}") |
| params = {"limit": config["limit"]} |
| params.update(config["params"]) |
|
|
| headers = {"Accept": "application/json"} |
| url = base_url |
| page = 0 |
|
|
| jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl") |
| with open(jsonl_file, "w") as f: |
| pass |
|
|
| while url: |
| if endpoint == "posts": |
| params["skip"] = page * params["limit"] |
|
|
| try: |
| data, link_header = await fetch_data_page( |
| session, url, params, headers |
| ) |
|
|
| with open(jsonl_file, "a") as f: |
| f.write(json.dumps(data) + "\n") |
|
|
| if endpoint == "posts": |
| total_items = data.get("numTotalItems", 0) |
| items_on_page = len(data.get("socialPosts", [])) |
| if (page + 1) * params[ |
| "limit" |
| ] >= total_items or items_on_page == 0: |
| url = None |
| else: |
| url = base_url |
| else: |
| url = parse_link_header(link_header) |
| if url: |
| params = {} |
|
|
| page += 1 |
|
|
| except Exception as e: |
| print(f"Error on page {page} for {endpoint}: {e}") |
| await asyncio.sleep(2) |
| if page > 0: |
| url = None |
| else: |
| raise |
|
|
| print(f" Raw data for {endpoint} saved to {jsonl_file}") |
| jsonl_files[endpoint] = jsonl_file |
|
|
| |
| for endpoint in endpoints: |
| jsonl_file = jsonl_files.get(endpoint) |
| if not jsonl_file or not os.path.exists(jsonl_file): |
| continue |
|
|
| print(f"Processing {endpoint} from JSONL...") |
| output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet") |
| total_rows = jsonl_to_parquet(endpoint, jsonl_file, output_file) |
| print(f"✓ {endpoint}: {total_rows:,} rows -> {output_file}") |
| created_files.append(output_file) |
|
|
| if not skip_upload: |
| upload_to_hub(output_file, REPO_ID) |
|
|
| elapsed = time.time() - start_time |
| return created_files, elapsed |
|
|
|
|
| def recreate_from_jsonl(): |
| endpoints = [ |
| "daily_papers", |
| "arxiv_papers", |
| "models", |
| "spaces", |
| "datasets", |
| "posts", |
| ] |
|
|
| for endpoint in endpoints: |
| jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl") |
| if not os.path.exists(jsonl_file): |
| print(f"✗ {jsonl_file} not found") |
| continue |
|
|
| print(f"Recreating {endpoint} from {jsonl_file}...") |
| output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet") |
| total_rows = jsonl_to_parquet(endpoint, jsonl_file, output_file) |
| print(f"✓ {endpoint}: {total_rows:,} rows -> {output_file}") |
|
|
|
|
| def upload_to_hub(file_path, repo_id): |
| try: |
| api.upload_file( |
| path_or_fileobj=file_path, |
| path_in_repo=os.path.basename(file_path), |
| repo_id=repo_id, |
| repo_type="dataset", |
| ) |
| print(f"✓ Uploaded {os.path.basename(file_path)} to {repo_id}") |
| return True |
| except Exception as e: |
| print(f"✗ Failed to upload {os.path.basename(file_path)}: {e}") |
| return False |
|
|
|
|
| def main(skip_upload=False): |
| created_files, elapsed = asyncio.run(create_parquet_files(skip_upload=skip_upload)) |
|
|
| print(f"\nCompleted in {elapsed:.2f} seconds") |
| print(f"Created {len(created_files)} parquet files:") |
|
|
| for file in created_files: |
| size = os.path.getsize(file) |
| pf = pq.ParquetFile(file) |
| rows = pf.metadata.num_rows |
| print(f" {os.path.basename(file)}: {rows:,} rows, {size:,} bytes") |
|
|
| if skip_upload: |
| print(f"\nRaw JSONL files saved to {CACHE_DIR}/ for recreation") |
| print("Use 'python app.py --recreate' to recreate parquet files from JSONL") |
|
|
|
|
| if __name__ == "__main__": |
| import sys |
|
|
| if "--recreate" in sys.argv: |
| recreate_from_jsonl() |
| else: |
| skip_upload = "--skip-upload" in sys.argv |
| main(skip_upload=skip_upload) |
|
|