| |
| """ |
| License Detection Benchmark for CodeReality-1T Dataset |
| Evaluates automated license classification systems |
| """ |
|
|
| import json |
| import os |
| from typing import Dict, List, Tuple |
| from collections import defaultdict |
| import random |
|
|
| def load_dataset_sample(data_dir: str, sample_size: int = 1000) -> List[Dict]: |
| """Load random sample of repositories from dataset.""" |
| print(f"🔍 Loading sample of {sample_size} repositories...") |
|
|
| repositories = [] |
|
|
| |
| files = [f for f in os.listdir(data_dir) if f.endswith('.jsonl')] |
| random.shuffle(files) |
|
|
| for filename in files[:10]: |
| file_path = os.path.join(data_dir, filename) |
| try: |
| with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
| for line in f: |
| if len(repositories) >= sample_size: |
| break |
| try: |
| repo_data = json.loads(line) |
| repositories.append(repo_data) |
| except json.JSONDecodeError: |
| continue |
| except Exception as e: |
| print(f"⚠️ Error reading {filename}: {e}") |
| continue |
|
|
| if len(repositories) >= sample_size: |
| break |
|
|
| print(f"✅ Loaded {len(repositories)} repositories") |
| return repositories |
|
|
| def extract_license_features(repo: Dict) -> Dict: |
| """Extract features that could indicate license presence.""" |
| features = { |
| 'has_license_file': False, |
| 'has_readme': False, |
| 'license_keywords_count': 0, |
| 'copyright_mentions': 0, |
| 'file_count': 0, |
| 'detected_license': repo.get('license', 'Unknown') |
| } |
|
|
| files = repo.get('files', []) |
| features['file_count'] = len(files) |
|
|
| license_keywords = ['license', 'mit', 'apache', 'gpl', 'bsd', 'copyright'] |
|
|
| for file_obj in files: |
| if isinstance(file_obj, dict): |
| file_path = file_obj.get('path', '').lower() |
| content = file_obj.get('content', '').lower() |
|
|
| |
| if any(keyword in file_path for keyword in ['license', 'copying', 'legal']): |
| features['has_license_file'] = True |
|
|
| |
| if 'readme' in file_path: |
| features['has_readme'] = True |
|
|
| |
| for keyword in license_keywords: |
| features['license_keywords_count'] += content.count(keyword) |
|
|
| |
| features['copyright_mentions'] += content.count('copyright') |
|
|
| return features |
|
|
| def simple_license_classifier(features: Dict) -> str: |
| """Simple rule-based license classifier for demonstration.""" |
|
|
| |
| if features['has_license_file']: |
| if features['license_keywords_count'] > 10: |
| return 'MIT' |
| elif features['copyright_mentions'] > 5: |
| return 'Apache-2.0' |
| else: |
| return 'GPL-3.0' |
| elif features['has_readme'] and features['license_keywords_count'] > 3: |
| return 'MIT' |
| elif features['file_count'] > 50 and features['copyright_mentions'] > 2: |
| return 'Apache-2.0' |
| else: |
| return 'Unknown' |
|
|
| def evaluate_license_detection(repositories: List[Dict]) -> Dict: |
| """Evaluate license detection performance.""" |
| print("🧮 Evaluating license detection...") |
|
|
| results = { |
| 'total_repos': len(repositories), |
| 'predictions': [], |
| 'ground_truth': [], |
| 'accuracy': 0.0, |
| 'license_distribution': defaultdict(int), |
| 'prediction_distribution': defaultdict(int) |
| } |
|
|
| for repo in repositories: |
| features = extract_license_features(repo) |
| predicted_license = simple_license_classifier(features) |
| actual_license = features['detected_license'] |
|
|
| results['predictions'].append(predicted_license) |
| results['ground_truth'].append(actual_license) |
| results['license_distribution'][actual_license] += 1 |
| results['prediction_distribution'][predicted_license] += 1 |
|
|
| |
| correct = sum(1 for p, a in zip(results['predictions'], results['ground_truth']) if p == a) |
| results['accuracy'] = correct / len(repositories) if repositories else 0 |
|
|
| return results |
|
|
| def print_benchmark_results(results: Dict): |
| """Print formatted benchmark results.""" |
| print("=" * 60) |
| print("📊 LICENSE DETECTION BENCHMARK RESULTS") |
| print("=" * 60) |
|
|
| print(f"Total repositories evaluated: {results['total_repos']}") |
| print(f"Overall accuracy: {results['accuracy']:.3f}") |
|
|
| print("\n📈 Ground Truth Distribution:") |
| for license_type, count in sorted(results['license_distribution'].items(), key=lambda x: x[1], reverse=True)[:10]: |
| percentage = (count / results['total_repos']) * 100 |
| print(f" {license_type}: {count} ({percentage:.1f}%)") |
|
|
| print("\n🎯 Prediction Distribution:") |
| for license_type, count in sorted(results['prediction_distribution'].items(), key=lambda x: x[1], reverse=True): |
| percentage = (count / results['total_repos']) * 100 |
| print(f" {license_type}: {count} ({percentage:.1f}%)") |
|
|
| print("\n💡 Insights:") |
| print("- CodeReality-1T is deliberately noisy with 0% license detection") |
| print("- This benchmark demonstrates the challenge of license classification") |
| print("- Most repositories lack clear licensing information") |
| print("- Perfect for testing robustness of license detection systems") |
|
|
| def main(): |
| """Run license detection benchmark.""" |
| print("🚀 CodeReality-1T License Detection Benchmark") |
| print("=" * 60) |
|
|
| |
| data_dir = "/mnt/z/CodeReality_Final/unified_dataset" |
| sample_size = 500 |
|
|
| if not os.path.exists(data_dir): |
| print(f"❌ Dataset directory not found: {data_dir}") |
| print("Please update the data_dir path to point to your CodeReality-1T dataset") |
| return |
|
|
| |
| repositories = load_dataset_sample(data_dir, sample_size) |
|
|
| if not repositories: |
| print("❌ No repositories loaded. Check dataset path.") |
| return |
|
|
| |
| results = evaluate_license_detection(repositories) |
|
|
| |
| print_benchmark_results(results) |
|
|
| |
| output_file = "license_detection_results.json" |
| with open(output_file, 'w') as f: |
| |
| results_json = { |
| 'total_repos': results['total_repos'], |
| 'accuracy': results['accuracy'], |
| 'license_distribution': dict(results['license_distribution']), |
| 'prediction_distribution': dict(results['prediction_distribution']) |
| } |
| json.dump(results_json, f, indent=2) |
|
|
| print(f"\n💾 Results saved to: {output_file}") |
|
|
| if __name__ == "__main__": |
| main() |