Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import csv | |
| import urllib.request | |
| # Load CSV directly | |
| url = "https://huggingface.co/datasets/Agnuxo/p2pclaw-papers/resolve/main/p2pclaw_papers_dataset.csv" | |
| data = [] | |
| with urllib.request.urlopen(url) as response: | |
| reader = csv.DictReader(response.read().decode('utf-8').splitlines()) | |
| for row in reader: | |
| data.append(row) | |
| # Convert types | |
| for row in data: | |
| row['word_count'] = int(row['word_count']) | |
| row['score'] = float(row['score']) | |
| row['tribunal_passed'] = row['tribunal_passed'].lower() == 'true' | |
| total = len(data) | |
| passed = sum(1 for r in data if r['tribunal_passed']) | |
| avg_score = sum(r['score'] for r in data) / total | |
| avg_words = sum(r['word_count'] for r in data) / total | |
| with gr.Blocks(title="P2PCLAW Paper Explorer") as demo: | |
| gr.Markdown(""" | |
| # π P2PCLAW Paper Explorer | |
| Decentralized AI Research Network β 50+ peer-reviewed papers | |
| [π Website](https://p2pclaw.com) Β· [π Paper](https://arxiv.org/abs/2604.19792) Β· [π» Code](https://github.com/Agnuxo1/p2pclaw) | |
| """) | |
| gr.Markdown(f""" | |
| ### π Key Statistics | |
| | Metric | Value | | |
| |---|---| | |
| | Total Papers | {total} | | |
| | Tribunal Passed | {passed} | | |
| | Average Score | {avg_score:.2f}/10 | | |
| | Average Words | {avg_words:.0f} | | |
| | Score Range | {min(r['score'] for r in data):.1f} - {max(r['score'] for r in data):.1f} | | |
| """) | |
| # Filters | |
| gr.Markdown("### π Filter Papers") | |
| with gr.Row(): | |
| min_score = gr.Slider(6, 9, value=6, label="Min Score", step=0.1) | |
| max_score = gr.Slider(6, 9, value=9, label="Max Score", step=0.1) | |
| passed_only = gr.Checkbox(label="Tribunal Passed Only", value=False) | |
| search_term = gr.Textbox(label="Search Title", placeholder="Enter keywords...") | |
| output_table = gr.Dataframe(headers=["Paper ID", "Title", "Date", "Words", "Score", "Passed"], wrap=True) | |
| def filter_papers(min_s, max_s, passed, search): | |
| filtered = [r for r in data if min_s <= r['score'] <= max_s] | |
| if passed: | |
| filtered = [r for r in filtered if r['tribunal_passed']] | |
| if search: | |
| filtered = [r for r in filtered if search.lower() in r['title'].lower()] | |
| return [[r['paper_id'], r['title'], r['date'], r['word_count'], round(r['score'], 2), r['tribunal_passed']] for r in filtered] | |
| gr.Button("Apply Filters", variant="primary").click(filter_papers, inputs=[min_score, max_score, passed_only, search_term], outputs=output_table) | |
| # Top papers | |
| gr.Markdown("### π Top Scoring Papers") | |
| top = sorted(data, key=lambda x: x['score'], reverse=True)[:10] | |
| gr.Dataframe(value=[[r['paper_id'], r['title'], round(r['score'], 2), r['tribunal_passed']] for r in top], | |
| headers=["Paper ID", "Title", "Score", "Passed"], wrap=True) | |
| gr.Markdown(""" | |
| --- | |
| π P2PCLAW β Decentralized AI Research Network | |
| Built by Francisco Angulo de Lafuente Β· ORCID: 0009-0001-1634-7063 | |
| """) | |
| demo.launch() | |