| import os |
| import subprocess |
| import json |
| import logging |
| from github import Github |
| import tiktoken |
| from pathlib import Path |
| import shutil |
|
|
| |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
| |
| github_token = os.getenv('GITHUB_TOKEN') |
| if not github_token: |
| raise ValueError("GITHUB_TOKEN environment variable is not set") |
| g = Github(github_token) |
|
|
| |
| tokenizer = tiktoken.get_encoding("cl100k_base") |
|
|
| def clone_repo(repo_url, repo_name): |
| tmp_dir = f"/tmp/{repo_name}" |
| subprocess.run(["git", "clone", "--depth", "1", repo_url, tmp_dir], check=True) |
| return tmp_dir |
|
|
| def get_repo_content(repo_dir): |
| content = [] |
| for root, dirs, files in os.walk(repo_dir): |
| if "test" in root.lower() or "example" in root.lower(): |
| continue |
| for file in files: |
| if file.endswith('.py'): |
| file_path = os.path.join(root, file) |
| relative_path = os.path.relpath(file_path, repo_dir) |
| with open(file_path, 'r') as f: |
| file_content = f.read() |
| content.append(f"File: {relative_path}\n\n{file_content}\n\n") |
| return "\n".join(content) |
|
|
| def count_tokens(text): |
| return len(tokenizer.encode(text)) |
|
|
| def process_repo(repo): |
| repo_name = repo.name |
| repo_url = repo.clone_url |
| logging.info(f"Processing repository: {repo_name}") |
|
|
| try: |
| tmp_dir = clone_repo(repo_url, repo_name) |
| readme_path = os.path.join(tmp_dir, "README.md") |
| |
| if not os.path.exists(readme_path): |
| logging.info(f"README.md not found in {repo_name}") |
| return None |
|
|
| repo_content = get_repo_content(tmp_dir) |
| if count_tokens(repo_content) >= 100000: |
| logging.info(f"Repository {repo_name} content exceeds 100k tokens") |
| return None |
|
|
| with open(readme_path, 'r') as f: |
| readme_content = f.read() |
|
|
| repo_commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=tmp_dir).decode().strip() |
|
|
| return { |
| "repo_name": repo_name, |
| "repo_commit": repo_commit, |
| "repo_content": repo_content, |
| "repo_readme": readme_content |
| } |
| except Exception as e: |
| logging.error(f"Error processing repository {repo_name}: {str(e)}") |
| return None |
| finally: |
| if 'tmp_dir' in locals(): |
| shutil.rmtree(tmp_dir) |
|
|
| def load_existing_data(filename): |
| existing_data = {} |
| if os.path.exists(filename): |
| with open(filename, "r") as f: |
| for line in f: |
| item = json.loads(line) |
| existing_data[item['repo_name']] = item |
| return existing_data |
|
|
| def save_dataset(filename, dataset, mode='a'): |
| with open(filename, mode) as f: |
| for item in dataset: |
| json.dump(item, f) |
| f.write("\n") |
|
|
| def main(): |
| g = Github(os.getenv('GITHUB_TOKEN')) |
| filename = "generate-readme-eval.jsonl" |
| existing_data = load_existing_data(filename) |
| |
| new_dataset = [] |
| updated_count = 0 |
| skipped_count = 0 |
|
|
| repos = g.search_repositories(query="language:python stars:>1000 forks:>100", sort="stars", order="desc") |
|
|
| for i, repo in enumerate(repos[:400]): |
| if repo.full_name in existing_data: |
| existing_item = existing_data[repo.full_name] |
| if existing_item['repo_commit'] == repo.get_commits()[0].sha: |
| skipped_count += 1 |
| logging.info(f"Skipped {repo.full_name}: Already processed with same commit") |
| continue |
| else: |
| logging.info(f"Updating {repo.full_name}: Commit changed") |
| updated_count += 1 |
| |
| item = process_repo(repo) |
| if item: |
| new_dataset.append(item) |
| |
| if i % 10 == 0: |
| logging.info(f"Processed {i+1} repositories") |
|
|
| |
| save_dataset(filename, new_dataset, mode='a') |
|
|
| logging.info(f"Dataset updated with {len(new_dataset)} new/updated items") |
| logging.info(f"Skipped {skipped_count} repositories (no changes)") |
| logging.info(f"Updated {updated_count} repositories") |
|
|
| if __name__ == "__main__": |
| main() |
|
|