Update org landing page with benchmark results
Browse files- index.html +135 -17
index.html
CHANGED
|
@@ -1,19 +1,137 @@
|
|
| 1 |
<!doctype html>
|
| 2 |
-
<html>
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
</html>
|
|
|
|
| 1 |
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="utf-8" />
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 6 |
+
<title>NewsReX β Pre-trained News Recommendation Models</title>
|
| 7 |
+
<link rel="stylesheet" href="style.css" />
|
| 8 |
+
</head>
|
| 9 |
+
<body>
|
| 10 |
+
<div class="container">
|
| 11 |
+
<header>
|
| 12 |
+
<h1>NewsReX</h1>
|
| 13 |
+
<p class="subtitle">Pre-trained News Recommendation Models</p>
|
| 14 |
+
<div class="badges">
|
| 15 |
+
<a href="https://arxiv.org/abs/2508.21572"><img src="https://img.shields.io/badge/arXiv-2508.21572-b31b1b.svg" alt="arXiv"></a>
|
| 16 |
+
<a href="https://github.com/igor17400/NewsReX"><img src="https://img.shields.io/badge/GitHub-NewsReX-blue.svg" alt="GitHub"></a>
|
| 17 |
+
<a href="https://www.python.org/downloads/release/python-3120/"><img src="https://img.shields.io/badge/python-3.12+-blue.svg" alt="Python 3.12+"></a>
|
| 18 |
+
</div>
|
| 19 |
+
</header>
|
| 20 |
+
|
| 21 |
+
<section>
|
| 22 |
+
<p>This organization hosts pre-trained weights for <strong>10 neural news recommendation models</strong> trained on the <a href="https://msnews.github.io/">MIND-small</a> dataset using the <a href="https://github.com/igor17400/NewsReX">NewsReX</a> framework. All models are trained with 3 random seeds (42, 123, 456) and evaluated on the standard MIND test split.</p>
|
| 23 |
+
</section>
|
| 24 |
+
|
| 25 |
+
<section>
|
| 26 |
+
<h2>Benchmark Results (MIND-small, mean ± std over 3 seeds)</h2>
|
| 27 |
+
|
| 28 |
+
<h3>JAX Models</h3>
|
| 29 |
+
<div class="table-wrapper">
|
| 30 |
+
<table>
|
| 31 |
+
<thead>
|
| 32 |
+
<tr><th>Model</th><th>AUC</th><th>MRR</th><th>NDCG@5</th><th>NDCG@10</th><th>Weights</th></tr>
|
| 33 |
+
</thead>
|
| 34 |
+
<tbody>
|
| 35 |
+
<tr><td><strong>CROWN</strong></td><td>0.6778±0.0030</td><td>0.3246±0.0018</td><td>0.3619±0.0022</td><td>0.4233±0.0022</td><td><a href="https://huggingface.co/newsrex/CROWN-JAX-MIND-small">Download</a></td></tr>
|
| 36 |
+
<tr><td><strong>DIGAT</strong></td><td>0.6760±0.0021</td><td>0.3245±0.0021</td><td>0.3594±0.0035</td><td>0.4220±0.0027</td><td><a href="https://huggingface.co/newsrex/DIGAT-JAX-MIND-small">Download</a></td></tr>
|
| 37 |
+
<tr><td><strong>CAUM</strong></td><td>0.6734±0.0013</td><td>0.3202±0.0009</td><td>0.3546±0.0009</td><td>0.4185±0.0006</td><td><a href="https://huggingface.co/newsrex/CAUM-JAX-MIND-small">Download</a></td></tr>
|
| 38 |
+
<tr><td><strong>TCCM</strong></td><td>0.6734±0.0055</td><td>0.3208±0.0034</td><td>0.3574±0.0046</td><td>0.4194±0.0043</td><td><a href="https://huggingface.co/newsrex/TCCM-JAX-MIND-small">Download</a></td></tr>
|
| 39 |
+
<tr><td><strong>PP-Rec</strong></td><td>0.6676±0.0040</td><td>0.3182±0.0033</td><td>0.3544±0.0041</td><td>0.4164±0.0036</td><td><a href="https://huggingface.co/newsrex/PPREC-JAX-MIND-small">Download</a></td></tr>
|
| 40 |
+
<tr><td><strong>LSTUR</strong></td><td>0.6672±0.0020</td><td>0.3177±0.0033</td><td>0.3525±0.0037</td><td>0.4156±0.0033</td><td><a href="https://huggingface.co/newsrex/LSTUR-JAX-MIND-small">Download</a></td></tr>
|
| 41 |
+
<tr><td><strong>NAML</strong></td><td>0.6639±0.0014</td><td>0.3130±0.0022</td><td>0.3456±0.0033</td><td>0.4097±0.0025</td><td><a href="https://huggingface.co/newsrex/NAML-JAX-MIND-small">Download</a></td></tr>
|
| 42 |
+
<tr><td><strong>GLORY</strong></td><td>0.6624±0.0030</td><td>0.3152±0.0038</td><td>0.3483±0.0041</td><td>0.4119±0.0040</td><td><a href="https://huggingface.co/newsrex/GLORY-JAX-MIND-small">Download</a></td></tr>
|
| 43 |
+
<tr><td><strong>MINER</strong></td><td>0.6579±0.0024</td><td>0.3117±0.0027</td><td>0.3444±0.0035</td><td>0.4080±0.0025</td><td><a href="https://huggingface.co/newsrex/MINER-JAX-MIND-small">Download</a></td></tr>
|
| 44 |
+
<tr><td><strong>NRMS</strong></td><td>0.6561±0.0006</td><td>0.3075±0.0008</td><td>0.3394±0.0003</td><td>0.4039±0.0007</td><td><a href="https://huggingface.co/newsrex/NRMS-JAX-MIND-small">Download</a></td></tr>
|
| 45 |
+
</tbody>
|
| 46 |
+
</table>
|
| 47 |
+
</div>
|
| 48 |
+
|
| 49 |
+
<h3>PyTorch Models</h3>
|
| 50 |
+
<div class="table-wrapper">
|
| 51 |
+
<table>
|
| 52 |
+
<thead>
|
| 53 |
+
<tr><th>Model</th><th>AUC</th><th>MRR</th><th>NDCG@5</th><th>NDCG@10</th><th>Weights</th></tr>
|
| 54 |
+
</thead>
|
| 55 |
+
<tbody>
|
| 56 |
+
<tr><td><strong>CROWN</strong></td><td>0.6705±0.0045</td><td>0.3183±0.0049</td><td>0.3553±0.0056</td><td>0.4173±0.0056</td><td><a href="https://huggingface.co/newsrex/CROWN-PYTORCH-MIND-small">Download</a></td></tr>
|
| 57 |
+
<tr><td><strong>CAUM</strong></td><td>0.6656±0.0053</td><td>0.3176±0.0028</td><td>0.3504±0.0040</td><td>0.4149±0.0035</td><td><a href="https://huggingface.co/newsrex/CAUM-PYTORCH-MIND-small">Download</a></td></tr>
|
| 58 |
+
<tr><td><strong>NAML</strong></td><td>0.6654±0.0015</td><td>0.3105±0.0009</td><td>0.3464±0.0027</td><td>0.4097±0.0018</td><td><a href="https://huggingface.co/newsrex/NAML-PYTORCH-MIND-small">Download</a></td></tr>
|
| 59 |
+
<tr><td><strong>PP-Rec</strong></td><td>0.6631±0.0044</td><td>0.3130±0.0024</td><td>0.3487±0.0041</td><td>0.4111±0.0033</td><td><a href="https://huggingface.co/newsrex/PPREC-PYTORCH-MIND-small">Download</a></td></tr>
|
| 60 |
+
<tr><td><strong>TCCM</strong></td><td>0.6616±0.0019</td><td>0.3088±0.0022</td><td>0.3428±0.0031</td><td>0.4057±0.0024</td><td><a href="https://huggingface.co/newsrex/TCCM-PYTORCH-MIND-small">Download</a></td></tr>
|
| 61 |
+
<tr><td><strong>NRMS</strong></td><td>0.6534±0.0025</td><td>0.3052±0.0021</td><td>0.3367±0.0019</td><td>0.4017±0.0022</td><td><a href="https://huggingface.co/newsrex/NRMS-PYTORCH-MIND-small">Download</a></td></tr>
|
| 62 |
+
<tr><td><strong>LSTUR</strong></td><td>—</td><td>—</td><td>—</td><td>—</td><td><a href="https://huggingface.co/newsrex/LSTUR-PYTORCH-MIND-small">Download</a></td></tr>
|
| 63 |
+
<tr><td><strong>DIGAT</strong></td><td>—</td><td>—</td><td>—</td><td>—</td><td><a href="https://huggingface.co/newsrex/DIGAT-PYTORCH-MIND-small">Download</a></td></tr>
|
| 64 |
+
<tr><td><strong>GLORY</strong></td><td>—</td><td>—</td><td>—</td><td>—</td><td><a href="https://huggingface.co/newsrex/GLORY-PYTORCH-MIND-small">Download</a></td></tr>
|
| 65 |
+
</tbody>
|
| 66 |
+
</table>
|
| 67 |
+
</div>
|
| 68 |
+
</section>
|
| 69 |
+
|
| 70 |
+
<section>
|
| 71 |
+
<h2>Supported Models</h2>
|
| 72 |
+
<div class="table-wrapper">
|
| 73 |
+
<table>
|
| 74 |
+
<thead>
|
| 75 |
+
<tr><th>Model</th><th>Paper</th><th>Venue</th></tr>
|
| 76 |
+
</thead>
|
| 77 |
+
<tbody>
|
| 78 |
+
<tr><td>NRMS</td><td>Neural News Recommendation with Multi-Head Self-Attention</td><td>EMNLP 2019</td></tr>
|
| 79 |
+
<tr><td>NAML</td><td>Neural News Recommendation with Attentive Multi-View Learning</td><td>EMNLP 2019</td></tr>
|
| 80 |
+
<tr><td>LSTUR</td><td>Neural News Recommendation with Long- and Short-term User Representations</td><td>ACL 2019</td></tr>
|
| 81 |
+
<tr><td>CROWN</td><td>Intent Disentanglement and Feature Self-Supervision for News Recommendation</td><td>WWW 2025</td></tr>
|
| 82 |
+
<tr><td>PP-Rec</td><td>News Recommendation with Personalized User Interest and Popularity Deconfounding</td><td>ACL 2021</td></tr>
|
| 83 |
+
<tr><td>DIGAT</td><td>Dual Interactive Graph Attention Networks for News Recommendation</td><td>EMNLP 2022</td></tr>
|
| 84 |
+
<tr><td>GLORY</td><td>Global-Local News Recommendation via Multi-Channel Graph Modeling</td><td>NAACL 2024</td></tr>
|
| 85 |
+
<tr><td>MINER</td><td>Multi-Interest News Extraction and Recommendation</td><td>EMNLP 2022</td></tr>
|
| 86 |
+
<tr><td>CAUM</td><td>Candidate-Aware User Modeling for News Recommendation</td><td>RecSys 2023</td></tr>
|
| 87 |
+
<tr><td>TCCM</td><td>Topic-Centric Conversational Collaborative Model for News Recommendation</td><td>CIKM 2022</td></tr>
|
| 88 |
+
</tbody>
|
| 89 |
+
</table>
|
| 90 |
+
</div>
|
| 91 |
+
</section>
|
| 92 |
+
|
| 93 |
+
<section>
|
| 94 |
+
<h2>Quick Start</h2>
|
| 95 |
+
<pre><code>git clone https://github.com/igor17400/NewsReX.git
|
| 96 |
+
cd NewsReX && uv sync --extra jax
|
| 97 |
+
|
| 98 |
+
# Evaluate a pre-trained model
|
| 99 |
+
uv run python src/train.py experiment=mind/nrms framework=jax \
|
| 100 |
+
weights=hf://newsrex/NRMS-JAX-MIND-small/model.safetensors
|
| 101 |
+
|
| 102 |
+
# Train from scratch (3 seeds)
|
| 103 |
+
uv run python src/train.py experiment=mind/nrms framework=jax \
|
| 104 |
+
multi_seed.enabled=true</code></pre>
|
| 105 |
+
</section>
|
| 106 |
+
|
| 107 |
+
<section>
|
| 108 |
+
<h2>Repository Structure</h2>
|
| 109 |
+
<pre><code>newsrex/{MODEL}-{FRAMEWORK}-MIND-small/
|
| 110 |
+
βββ model.safetensors <- best seed (default download)
|
| 111 |
+
βββ test_results.json
|
| 112 |
+
βββ training_run_summary.json
|
| 113 |
+
βββ seed_42/model.safetensors
|
| 114 |
+
βββ seed_123/model.safetensors
|
| 115 |
+
βββ seed_456/model.safetensors
|
| 116 |
+
βββ README.md</code></pre>
|
| 117 |
+
</section>
|
| 118 |
+
|
| 119 |
+
<section>
|
| 120 |
+
<h2>Citation</h2>
|
| 121 |
+
<pre><code>@misc{azevedo2025newsrex,
|
| 122 |
+
title={NewsReX: A More Efficient Approach to News Recommendation with Keras 3 and JAX},
|
| 123 |
+
author={Igor L. R. Azevedo and Toyotaro Suzumura and Yuichiro Yasui},
|
| 124 |
+
year={2025},
|
| 125 |
+
eprint={2508.21572},
|
| 126 |
+
archivePrefix={arXiv},
|
| 127 |
+
primaryClass={cs.IR},
|
| 128 |
+
url={https://arxiv.org/abs/2508.21572},
|
| 129 |
+
}</code></pre>
|
| 130 |
+
</section>
|
| 131 |
+
|
| 132 |
+
<footer>
|
| 133 |
+
<p><strong>Authors:</strong> Igor L.R. Azevedo (U. Tokyo) · Toyotaro Suzumura (U. Tokyo) · Yuichiro Yasui (Nikkei Inc.)</p>
|
| 134 |
+
</footer>
|
| 135 |
+
</div>
|
| 136 |
+
</body>
|
| 137 |
</html>
|