Datasets:
Upload ScaleBench dataset
Browse files- .gitattributes +0 -1
- README.md +320 -3
- __init__.py +0 -0
- data_constrained_scaling_law/laws.py +941 -0
- data_constrained_scaling_law/test-00000-of-00001.parquet +3 -0
- data_constrained_scaling_law/train-00000-of-00001.parquet +3 -0
- domain_mixture_scaling_law/laws.py +654 -0
- domain_mixture_scaling_law/test-00000-of-00001.parquet +3 -0
- domain_mixture_scaling_law/train-00000-of-00001.parquet +3 -0
- farseer_scaling_law/__init__.py +1 -0
- farseer_scaling_law/laws.py +99 -0
- farseer_scaling_law/test-00000-of-00001.parquet +3 -0
- farseer_scaling_law/train-00000-of-00001.parquet +3 -0
- lr_bsz_scaling_law/laws.py +1188 -0
- lr_bsz_scaling_law/test-00000-of-00001.parquet +3 -0
- lr_bsz_scaling_law/train-00000-of-00001.parquet +3 -0
- moe_scaling_law/laws.py +622 -0
- moe_scaling_law/test-00000-of-00001.parquet +3 -0
- moe_scaling_law/train-00000-of-00001.parquet +3 -0
- parallel_scaling_law/laws.py +393 -0
- parallel_scaling_law/test-00000-of-00001.parquet +3 -0
- parallel_scaling_law/train-00000-of-00001.parquet +3 -0
- registry.py +176 -0
- sparsity_scaling_law/__init__.py +0 -0
- sparsity_scaling_law/laws.py +220 -0
- sparsity_scaling_law/test-00000-of-00001.parquet +3 -0
- sparsity_scaling_law/train-00000-of-00001.parquet +3 -0
- utils.py +71 -0
- vocab_scaling_law/laws.py +630 -0
- vocab_scaling_law/test-00000-of-00001.parquet +3 -0
- vocab_scaling_law/train-00000-of-00001.parquet +3 -0
.gitattributes
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.avro filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,3 +1,320 @@
|
|
| 1 |
-
---
|
| 2 |
-
license:
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-4.0
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
pretty_name: Budget-Efficient Scaling Law Fitting Benchmark
|
| 6 |
+
task_categories:
|
| 7 |
+
- tabular-regression
|
| 8 |
+
tags:
|
| 9 |
+
- scaling-laws
|
| 10 |
+
- active-learning
|
| 11 |
+
- experimental-design
|
| 12 |
+
- benchmark
|
| 13 |
+
- language-modeling
|
| 14 |
+
size_categories:
|
| 15 |
+
- 1K<n<10K
|
| 16 |
+
dataset_info:
|
| 17 |
+
- config_name: data_constrained_scaling_law
|
| 18 |
+
features:
|
| 19 |
+
- name: group
|
| 20 |
+
dtype: string
|
| 21 |
+
- name: unique_tokens
|
| 22 |
+
dtype: float64
|
| 23 |
+
- name: params
|
| 24 |
+
dtype: float64
|
| 25 |
+
- name: tokens
|
| 26 |
+
dtype: float64
|
| 27 |
+
- name: loss
|
| 28 |
+
dtype: float64
|
| 29 |
+
splits:
|
| 30 |
+
- name: train
|
| 31 |
+
num_examples: 161
|
| 32 |
+
- name: test
|
| 33 |
+
num_examples: 21
|
| 34 |
+
- config_name: domain_mixture_scaling_law
|
| 35 |
+
features:
|
| 36 |
+
- name: group
|
| 37 |
+
dtype: string
|
| 38 |
+
- name: proportion_domain_1
|
| 39 |
+
dtype: float64
|
| 40 |
+
- name: proportion_domain_2
|
| 41 |
+
dtype: float64
|
| 42 |
+
- name: proportion_domain_3
|
| 43 |
+
dtype: float64
|
| 44 |
+
- name: proportion_domain_4
|
| 45 |
+
dtype: float64
|
| 46 |
+
- name: proportion_domain_5
|
| 47 |
+
dtype: float64
|
| 48 |
+
- name: loss_domain_1
|
| 49 |
+
dtype: float64
|
| 50 |
+
- name: loss_domain_2
|
| 51 |
+
dtype: float64
|
| 52 |
+
- name: loss_domain_3
|
| 53 |
+
dtype: float64
|
| 54 |
+
- name: loss_domain_4
|
| 55 |
+
dtype: float64
|
| 56 |
+
- name: loss_domain_5
|
| 57 |
+
dtype: float64
|
| 58 |
+
splits:
|
| 59 |
+
- name: train
|
| 60 |
+
num_examples: 80
|
| 61 |
+
- name: test
|
| 62 |
+
num_examples: 24
|
| 63 |
+
- config_name: farseer_scaling_law
|
| 64 |
+
features:
|
| 65 |
+
- name: group
|
| 66 |
+
dtype: string
|
| 67 |
+
- name: N
|
| 68 |
+
dtype: float64
|
| 69 |
+
- name: D
|
| 70 |
+
dtype: float64
|
| 71 |
+
- name: loss
|
| 72 |
+
dtype: float64
|
| 73 |
+
splits:
|
| 74 |
+
- name: train
|
| 75 |
+
num_examples: 404
|
| 76 |
+
- name: test
|
| 77 |
+
num_examples: 7
|
| 78 |
+
- config_name: lr_bsz_scaling_law
|
| 79 |
+
features:
|
| 80 |
+
- name: group
|
| 81 |
+
dtype: string
|
| 82 |
+
- name: lr
|
| 83 |
+
dtype: float64
|
| 84 |
+
- name: bsz
|
| 85 |
+
dtype: float64
|
| 86 |
+
- name: data_size
|
| 87 |
+
dtype: float64
|
| 88 |
+
- name: non_embedding_param_size
|
| 89 |
+
dtype: float64
|
| 90 |
+
- name: lm_loss
|
| 91 |
+
dtype: float64
|
| 92 |
+
splits:
|
| 93 |
+
- name: train
|
| 94 |
+
num_examples: 2702
|
| 95 |
+
- name: test
|
| 96 |
+
num_examples: 117
|
| 97 |
+
- config_name: moe_scaling_law
|
| 98 |
+
features:
|
| 99 |
+
- name: group
|
| 100 |
+
dtype: string
|
| 101 |
+
- name: num_experts
|
| 102 |
+
dtype: float64
|
| 103 |
+
- name: dense_parameter_count
|
| 104 |
+
dtype: float64
|
| 105 |
+
- name: loss_validation
|
| 106 |
+
dtype: float64
|
| 107 |
+
splits:
|
| 108 |
+
- name: train
|
| 109 |
+
num_examples: 193
|
| 110 |
+
- name: test
|
| 111 |
+
num_examples: 28
|
| 112 |
+
- config_name: parallel_scaling_law
|
| 113 |
+
features:
|
| 114 |
+
- name: num_params
|
| 115 |
+
dtype: int64
|
| 116 |
+
- name: parallel_size
|
| 117 |
+
dtype: int64
|
| 118 |
+
- name: group
|
| 119 |
+
dtype: string
|
| 120 |
+
- name: loss
|
| 121 |
+
dtype: float64
|
| 122 |
+
splits:
|
| 123 |
+
- name: train
|
| 124 |
+
num_examples: 36
|
| 125 |
+
- name: test
|
| 126 |
+
num_examples: 12
|
| 127 |
+
- config_name: sparsity_scaling_law
|
| 128 |
+
features:
|
| 129 |
+
- name: group
|
| 130 |
+
dtype: string
|
| 131 |
+
- name: P
|
| 132 |
+
dtype: float64
|
| 133 |
+
- name: N_active
|
| 134 |
+
dtype: float64
|
| 135 |
+
- name: N_dense
|
| 136 |
+
dtype: float64
|
| 137 |
+
- name: D1
|
| 138 |
+
dtype: float64
|
| 139 |
+
- name: D2
|
| 140 |
+
dtype: float64
|
| 141 |
+
- name: loss
|
| 142 |
+
dtype: float64
|
| 143 |
+
splits:
|
| 144 |
+
- name: train
|
| 145 |
+
num_examples: 70
|
| 146 |
+
- name: test
|
| 147 |
+
num_examples: 18
|
| 148 |
+
- config_name: vocab_scaling_law
|
| 149 |
+
features:
|
| 150 |
+
- name: group
|
| 151 |
+
dtype: string
|
| 152 |
+
- name: non_vocab_parameters
|
| 153 |
+
dtype: float64
|
| 154 |
+
- name: vocab_size
|
| 155 |
+
dtype: float64
|
| 156 |
+
- name: num_characters
|
| 157 |
+
dtype: float64
|
| 158 |
+
- name: unigram_normalized_loss
|
| 159 |
+
dtype: float64
|
| 160 |
+
splits:
|
| 161 |
+
- name: train
|
| 162 |
+
num_examples: 1080
|
| 163 |
+
- name: test
|
| 164 |
+
num_examples: 120
|
| 165 |
+
configs:
|
| 166 |
+
- config_name: data_constrained_scaling_law
|
| 167 |
+
data_files:
|
| 168 |
+
- split: train
|
| 169 |
+
path: data_constrained_scaling_law/train-*
|
| 170 |
+
- split: test
|
| 171 |
+
path: data_constrained_scaling_law/test-*
|
| 172 |
+
- config_name: domain_mixture_scaling_law
|
| 173 |
+
data_files:
|
| 174 |
+
- split: train
|
| 175 |
+
path: domain_mixture_scaling_law/train-*
|
| 176 |
+
- split: test
|
| 177 |
+
path: domain_mixture_scaling_law/test-*
|
| 178 |
+
- config_name: farseer_scaling_law
|
| 179 |
+
data_files:
|
| 180 |
+
- split: train
|
| 181 |
+
path: farseer_scaling_law/train-*
|
| 182 |
+
- split: test
|
| 183 |
+
path: farseer_scaling_law/test-*
|
| 184 |
+
- config_name: lr_bsz_scaling_law
|
| 185 |
+
data_files:
|
| 186 |
+
- split: train
|
| 187 |
+
path: lr_bsz_scaling_law/train-*
|
| 188 |
+
- split: test
|
| 189 |
+
path: lr_bsz_scaling_law/test-*
|
| 190 |
+
- config_name: moe_scaling_law
|
| 191 |
+
data_files:
|
| 192 |
+
- split: train
|
| 193 |
+
path: moe_scaling_law/train-*
|
| 194 |
+
- split: test
|
| 195 |
+
path: moe_scaling_law/test-*
|
| 196 |
+
- config_name: parallel_scaling_law
|
| 197 |
+
data_files:
|
| 198 |
+
- split: train
|
| 199 |
+
path: parallel_scaling_law/train-*
|
| 200 |
+
- split: test
|
| 201 |
+
path: parallel_scaling_law/test-*
|
| 202 |
+
- config_name: sparsity_scaling_law
|
| 203 |
+
data_files:
|
| 204 |
+
- split: train
|
| 205 |
+
path: sparsity_scaling_law/train-*
|
| 206 |
+
- split: test
|
| 207 |
+
path: sparsity_scaling_law/test-*
|
| 208 |
+
- config_name: vocab_scaling_law
|
| 209 |
+
data_files:
|
| 210 |
+
- split: train
|
| 211 |
+
path: vocab_scaling_law/train-*
|
| 212 |
+
- split: test
|
| 213 |
+
path: vocab_scaling_law/test-*
|
| 214 |
+
---
|
| 215 |
+
|
| 216 |
+
# Budget-Efficient Scaling Law Fitting Benchmark
|
| 217 |
+
|
| 218 |
+
This repository contains the scaling-law benchmark dataset used in
|
| 219 |
+
[Spend Less, Fit Better: Budget-Efficient Scaling Law Fitting via Active Experiment Selection](https://arxiv.org/abs/2604.22753).
|
| 220 |
+
|
| 221 |
+
The benchmark is designed for budget-aware sequential experimental design in scaling-law fitting. Each configuration provides a finite pool of candidate experiments, a held-out high-cost target region, task-specific covariates, observed outcomes, and companion scaling-law definitions in `laws.py`.
|
| 222 |
+
|
| 223 |
+
## Dataset Summary
|
| 224 |
+
|
| 225 |
+
The dataset contains 8 tabular regression tasks and 65 scaling-law instances. The tasks cover language-model scaling settings including pre-training hyperparameter tuning, data allocation, vocabulary design, domain mixture optimization, mixture-of-experts design, sparsity, parallel/inference-time scaling, and Farseer-style dense pre-training scaling.
|
| 226 |
+
|
| 227 |
+
Each task is stored as a separate Hugging Face configuration with `train` and `test` splits:
|
| 228 |
+
|
| 229 |
+
| Config | Train | Test | Feature columns | Target column(s) | Law instances |
|
| 230 |
+
|---|---:|---:|---|---|---:|
|
| 231 |
+
| `data_constrained_scaling_law` | 161 | 21 | `unique_tokens`, `params`, `tokens` | `loss` | 10 |
|
| 232 |
+
| `domain_mixture_scaling_law` | 80 | 24 | `proportion_domain_1` ... `proportion_domain_5` | `loss_domain_1` ... `loss_domain_5` | 10 |
|
| 233 |
+
| `farseer_scaling_law` | 404 | 7 | `N`, `D` | `loss` | 1 |
|
| 234 |
+
| `lr_bsz_scaling_law` | 2702 | 117 | `lr`, `bsz`, `data_size`, `non_embedding_param_size` | `lm_loss` | 10 |
|
| 235 |
+
| `moe_scaling_law` | 193 | 28 | `num_experts`, `dense_parameter_count` | `loss_validation` | 10 |
|
| 236 |
+
| `parallel_scaling_law` | 36 | 12 | `num_params`, `parallel_size` | `loss` | 10 |
|
| 237 |
+
| `sparsity_scaling_law` | 70 | 18 | `P`, `N_active` | `loss` | 4 |
|
| 238 |
+
| `vocab_scaling_law` | 1080 | 120 | `non_vocab_parameters`, `vocab_size`, `num_characters` | `unigram_normalized_loss` | 10 |
|
| 239 |
+
|
| 240 |
+
The `group` column identifies a task-specific subproblem or grouping. For example, domain-mixture rows are grouped by model scale, and parallel-scaling rows are grouped by evaluation corpus.
|
| 241 |
+
|
| 242 |
+
## Loading
|
| 243 |
+
|
| 244 |
+
```python
|
| 245 |
+
from datasets import load_dataset
|
| 246 |
+
|
| 247 |
+
ds = load_dataset("sijieli/scalebench", "lr_bsz_scaling_law")
|
| 248 |
+
print(ds)
|
| 249 |
+
print(ds["train"][0])
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
To load a local checkout before uploading:
|
| 253 |
+
|
| 254 |
+
```python
|
| 255 |
+
from datasets import load_dataset
|
| 256 |
+
|
| 257 |
+
ds = load_dataset(
|
| 258 |
+
"parquet",
|
| 259 |
+
data_files={
|
| 260 |
+
"train": "lr_bsz_scaling_law/train-*.parquet",
|
| 261 |
+
"test": "lr_bsz_scaling_law/test-*.parquet",
|
| 262 |
+
},
|
| 263 |
+
)
|
| 264 |
+
```
|
| 265 |
+
|
| 266 |
+
## Intended Use
|
| 267 |
+
|
| 268 |
+
This benchmark is intended for evaluating experiment-selection and active experimental-design methods for scaling-law fitting under budget constraints. A typical episode treats the `train` split as the candidate pool of runnable experiments and the `test` split as the target region for extrapolation evaluation.
|
| 269 |
+
|
| 270 |
+
The benchmark can be used to compare methods that:
|
| 271 |
+
|
| 272 |
+
- choose experiments sequentially under a cost budget;
|
| 273 |
+
- fit nonlinear scaling laws from sparse observations;
|
| 274 |
+
- extrapolate to held-out high-cost regions;
|
| 275 |
+
- optimize target-region prediction quality rather than in-sample fit.
|
| 276 |
+
|
| 277 |
+
## Cost Proxies
|
| 278 |
+
|
| 279 |
+
The paper uses task-specific cost proxies to model heterogeneous experiment costs. The implementation in `registry.py` defines the default proxies:
|
| 280 |
+
|
| 281 |
+
| Config | Cost proxy |
|
| 282 |
+
|---|---|
|
| 283 |
+
| `data_constrained_scaling_law` | `6 * params * tokens` |
|
| 284 |
+
| `domain_mixture_scaling_law` | `1` |
|
| 285 |
+
| `farseer_scaling_law` | `6 * N * D` |
|
| 286 |
+
| `lr_bsz_scaling_law` | `6 * non_embedding_param_size * data_size` |
|
| 287 |
+
| `moe_scaling_law` | `dense_parameter_count * num_experts` |
|
| 288 |
+
| `parallel_scaling_law` | `num_params` |
|
| 289 |
+
| `sparsity_scaling_law` | `6 * N_dense * D1 + 6 * N_active * D2` |
|
| 290 |
+
| `vocab_scaling_law` | `non_vocab_parameters * num_characters` |
|
| 291 |
+
|
| 292 |
+
## Scaling-Law Definitions
|
| 293 |
+
|
| 294 |
+
Each task directory includes a `laws.py` file containing the parametric scaling-law families used in the benchmark. The functions are named `sl_1`, `sl_2`, etc., and each file exposes:
|
| 295 |
+
|
| 296 |
+
- `LAW_REGISTRY`: mapping from law ID to callable;
|
| 297 |
+
- `PARAM_COUNTS`: number of free parameters for each law;
|
| 298 |
+
- parameter bounds used by the fitting code.
|
| 299 |
+
|
| 300 |
+
These files are included to make the dataset self-contained for reproducing the benchmark protocol.
|
| 301 |
+
|
| 302 |
+
## Citation
|
| 303 |
+
|
| 304 |
+
If you use this benchmark, please cite:
|
| 305 |
+
|
| 306 |
+
```bibtex
|
| 307 |
+
@misc{li2026spendlessfitbetter,
|
| 308 |
+
title={Spend Less, Fit Better: Budget-Efficient Scaling Law Fitting via Active Experiment Selection},
|
| 309 |
+
author={Sijie Li and Shanda Li and Haowei Lin and Weiwei Sun and Ameet Talwalkar and Yiming Yang},
|
| 310 |
+
year={2026},
|
| 311 |
+
eprint={2604.22753},
|
| 312 |
+
archivePrefix={arXiv},
|
| 313 |
+
primaryClass={cs.LG},
|
| 314 |
+
url={https://arxiv.org/abs/2604.22753}
|
| 315 |
+
}
|
| 316 |
+
```
|
| 317 |
+
|
| 318 |
+
## License
|
| 319 |
+
|
| 320 |
+
This dataset card follows the license metadata declared for this repository. Users should also respect the licenses and terms of the original data sources referenced by the paper.
|
__init__.py
ADDED
|
File without changes
|
data_constrained_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,941 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Literal
|
| 2 |
+
|
| 3 |
+
import benchmark.dataset.utils as utils
|
| 4 |
+
|
| 5 |
+
_EPS = 1e-12
|
| 6 |
+
_M_REF = 1.0
|
| 7 |
+
_T_REF = 1.0
|
| 8 |
+
_U_REF = 1.0
|
| 9 |
+
|
| 10 |
+
# Scaling law 1:
|
| 11 |
+
# A / N^alpha + B / D^beta + E * (U^gamma * N^delta)
|
| 12 |
+
# theta: [A, alpha, B, beta, E, gamma, delta]
|
| 13 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 14 |
+
ops = utils.get_ops(backend)
|
| 15 |
+
xp = ops.xp
|
| 16 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 17 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 18 |
+
|
| 19 |
+
# X: (M, 3)
|
| 20 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 21 |
+
|
| 22 |
+
# theta: (B, 7)
|
| 23 |
+
A = theta[:, 0]
|
| 24 |
+
alpha = theta[:, 1]
|
| 25 |
+
Bcoef = theta[:, 2]
|
| 26 |
+
beta = theta[:, 3]
|
| 27 |
+
Ecoef = theta[:, 4]
|
| 28 |
+
gamma = theta[:, 5]
|
| 29 |
+
delta = theta[:, 6]
|
| 30 |
+
|
| 31 |
+
N_b = N[None, :] # (1, M)
|
| 32 |
+
D_b = D[None, :]
|
| 33 |
+
U_b = U[None, :]
|
| 34 |
+
|
| 35 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 36 |
+
log_D = xp.log(ops.clamp_min(D_b, _EPS))
|
| 37 |
+
log_U = xp.log(ops.clamp_min(U_b, _EPS))
|
| 38 |
+
|
| 39 |
+
N_neg_alpha = N_b ** (-alpha[:, None]) # (B, M)
|
| 40 |
+
D_neg_beta = D_b ** (-beta[:, None]) # (B, M)
|
| 41 |
+
U_gamma = U_b ** gamma[:, None] # (B, M)
|
| 42 |
+
N_delta = N_b ** delta[:, None] # (B, M)
|
| 43 |
+
|
| 44 |
+
term1 = A[:, None] * N_neg_alpha # (B, M)
|
| 45 |
+
term2 = Bcoef[:, None] * D_neg_beta # (B, M)
|
| 46 |
+
term3 = Ecoef[:, None] * U_gamma * N_delta # (B, M)
|
| 47 |
+
|
| 48 |
+
pred = term1 + term2 + term3
|
| 49 |
+
|
| 50 |
+
# Jacobian: (B, M, 7)
|
| 51 |
+
d_A = N_neg_alpha # ∂/∂A = N^(-alpha)
|
| 52 |
+
d_alpha = -term1 * log_N # ∂/∂alpha = -A*N^(-alpha)*log(N)
|
| 53 |
+
d_B = D_neg_beta # ∂/∂B = D^(-beta)
|
| 54 |
+
d_beta = -term2 * log_D # ∂/∂beta = -B*D^(-beta)*log(D)
|
| 55 |
+
d_E = U_gamma * N_delta # ∂/∂E = U^gamma * N^delta
|
| 56 |
+
d_gamma = term3 * log_U # ∂/∂gamma = term3 * log(U)
|
| 57 |
+
d_delta = term3 * log_N # ∂/∂delta = term3 * log(N)
|
| 58 |
+
|
| 59 |
+
jac = ops.stack([d_A, d_alpha, d_B, d_beta, d_E, d_gamma, d_delta], axis=-1)
|
| 60 |
+
|
| 61 |
+
if pred.shape[0] == 1:
|
| 62 |
+
return pred[0], jac[0]
|
| 63 |
+
return pred, jac
|
| 64 |
+
|
| 65 |
+
# Scaling law 2:
|
| 66 |
+
# a + b * U^p + c * N^q + d * D^r
|
| 67 |
+
# theta: [a, b, c, d, p, q, r]
|
| 68 |
+
def sl_2(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 69 |
+
ops = utils.get_ops(backend)
|
| 70 |
+
xp = ops.xp
|
| 71 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 72 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 73 |
+
|
| 74 |
+
# X: (M, 3)
|
| 75 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 76 |
+
|
| 77 |
+
# theta: (B, 7)
|
| 78 |
+
a0 = theta[:, 0]
|
| 79 |
+
bcoef = theta[:, 1]
|
| 80 |
+
ccoef = theta[:, 2]
|
| 81 |
+
dcoef = theta[:, 3]
|
| 82 |
+
p = theta[:, 4]
|
| 83 |
+
q = theta[:, 5]
|
| 84 |
+
r = theta[:, 6]
|
| 85 |
+
|
| 86 |
+
U_b = U[None, :]
|
| 87 |
+
N_b = N[None, :]
|
| 88 |
+
D_b = D[None, :]
|
| 89 |
+
|
| 90 |
+
log_U = xp.log(ops.clamp_min(U_b, _EPS))
|
| 91 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 92 |
+
log_D = xp.log(ops.clamp_min(D_b, _EPS))
|
| 93 |
+
|
| 94 |
+
U_p = U_b ** p[:, None] # (B, M)
|
| 95 |
+
N_q = N_b ** q[:, None] # (B, M)
|
| 96 |
+
D_r = D_b ** r[:, None] # (B, M)
|
| 97 |
+
|
| 98 |
+
term2 = bcoef[:, None] * U_p
|
| 99 |
+
term3 = ccoef[:, None] * N_q
|
| 100 |
+
term4 = dcoef[:, None] * D_r
|
| 101 |
+
|
| 102 |
+
pred = a0[:, None] + term2 + term3 + term4
|
| 103 |
+
|
| 104 |
+
# Jacobian: (B, M, 7)
|
| 105 |
+
ones = pred * 0.0 + 1.0
|
| 106 |
+
d_a = ones # ∂/∂a = 1
|
| 107 |
+
d_b = U_p # ∂/∂b = U^p
|
| 108 |
+
d_c = N_q # ∂/∂c = N^q
|
| 109 |
+
d_d = D_r # ∂/∂d = D^r
|
| 110 |
+
d_p = term2 * log_U # ∂/∂p = b*U^p*log(U)
|
| 111 |
+
d_q = term3 * log_N # ∂/∂q = c*N^q*log(N)
|
| 112 |
+
d_r = term4 * log_D # ∂/∂r = d*D^r*log(D)
|
| 113 |
+
|
| 114 |
+
jac = ops.stack([d_a, d_b, d_c, d_d, d_p, d_q, d_r], axis=-1)
|
| 115 |
+
|
| 116 |
+
if pred.shape[0] == 1:
|
| 117 |
+
return pred[0], jac[0]
|
| 118 |
+
return pred, jac
|
| 119 |
+
|
| 120 |
+
# Scaling law 3 (data-constrained style):
|
| 121 |
+
# loss = A / eff_N^alpha + B / eff_D^alpha + C
|
| 122 |
+
# where
|
| 123 |
+
# U_D = U
|
| 124 |
+
# R_D = D / U_D - 1
|
| 125 |
+
# U_N = min(rho * U_D, N)
|
| 126 |
+
# R_N = max(N / U_N - 1, 0)
|
| 127 |
+
# eff_N = U_N + tau_N * U_N * (1 - exp(-R_N / tau_N))
|
| 128 |
+
# eff_D = U_D + tau_D * U_D * (1 - exp(-R_D / tau_D))
|
| 129 |
+
#
|
| 130 |
+
# theta: [A, tau_N, B, tau_D, alpha, C, rho]
|
| 131 |
+
def sl_3(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 132 |
+
ops = utils.get_ops(backend)
|
| 133 |
+
xp = ops.xp
|
| 134 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 135 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 136 |
+
|
| 137 |
+
# X: (M, 3)
|
| 138 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 139 |
+
|
| 140 |
+
# theta: (B, 7)
|
| 141 |
+
A = theta[:, 0]
|
| 142 |
+
tau_N = theta[:, 1]
|
| 143 |
+
Bcoef = theta[:, 2]
|
| 144 |
+
tau_D = theta[:, 3]
|
| 145 |
+
alpha = theta[:, 4]
|
| 146 |
+
C = theta[:, 5]
|
| 147 |
+
rho = theta[:, 6]
|
| 148 |
+
|
| 149 |
+
U_b = U[None, :] # (1, M)
|
| 150 |
+
N_b = N[None, :]
|
| 151 |
+
D_b = D[None, :]
|
| 152 |
+
|
| 153 |
+
# avoid divide-by-zero
|
| 154 |
+
U_D = ops.clamp_min(U_b, _EPS)
|
| 155 |
+
|
| 156 |
+
R_D = D_b / U_D - 1.0 # (1, M) or (B, M)
|
| 157 |
+
|
| 158 |
+
rho_U_D = rho[:, None] * U_D # (B, M)
|
| 159 |
+
U_N_raw = ops.minimum(rho_U_D, N_b) # (B, M)
|
| 160 |
+
U_N = ops.clamp_min(U_N_raw, _EPS)
|
| 161 |
+
|
| 162 |
+
R_N_raw = N_b / U_N - 1.0
|
| 163 |
+
R_N = ops.clamp_min(R_N_raw, 0.0) # (B, M)
|
| 164 |
+
|
| 165 |
+
tau_N_b = tau_N[:, None] # (B, 1)
|
| 166 |
+
tau_D_b = tau_D[:, None]
|
| 167 |
+
|
| 168 |
+
exp_RN = ops.exp(-R_N / tau_N_b) # (B, M)
|
| 169 |
+
exp_RD = ops.exp(-R_D / tau_D_b) # (B, M)
|
| 170 |
+
|
| 171 |
+
eff_N_raw = U_N + tau_N_b * U_N * (1.0 - exp_RN) # (B, M)
|
| 172 |
+
eff_D_raw = U_D + tau_D_b * U_D * (1.0 - exp_RD) # (B, M)
|
| 173 |
+
|
| 174 |
+
# Masks for clamp: derivative is 0 where clamp is active
|
| 175 |
+
mask_eff_N = (eff_N_raw > _EPS) * 1.0 # (B, M)
|
| 176 |
+
mask_eff_D = (eff_D_raw > _EPS) * 1.0 # (B, M)
|
| 177 |
+
|
| 178 |
+
eff_N = ops.clamp_min(eff_N_raw, _EPS)
|
| 179 |
+
eff_D = ops.clamp_min(eff_D_raw, _EPS)
|
| 180 |
+
|
| 181 |
+
log_eff_N = xp.log(ops.clamp_min(eff_N, _EPS))
|
| 182 |
+
log_eff_D = xp.log(ops.clamp_min(eff_D, _EPS))
|
| 183 |
+
|
| 184 |
+
eff_N_neg_alpha = eff_N ** (-alpha[:, None]) # (B, M)
|
| 185 |
+
eff_D_neg_alpha = eff_D ** (-alpha[:, None]) # (B, M)
|
| 186 |
+
|
| 187 |
+
termN = A[:, None] * eff_N_neg_alpha # (B, M)
|
| 188 |
+
termD = Bcoef[:, None] * eff_D_neg_alpha # (B, M)
|
| 189 |
+
|
| 190 |
+
pred = termN + termD + C[:, None]
|
| 191 |
+
|
| 192 |
+
# --- Jacobian ---
|
| 193 |
+
# ∂pred/∂A = eff_N^(-alpha)
|
| 194 |
+
d_A = eff_N_neg_alpha
|
| 195 |
+
|
| 196 |
+
# ∂pred/∂B = eff_D^(-alpha)
|
| 197 |
+
d_B = eff_D_neg_alpha
|
| 198 |
+
|
| 199 |
+
# ∂pred/∂alpha = -termN * log(eff_N) - termD * log(eff_D)
|
| 200 |
+
d_alpha = -termN * log_eff_N - termD * log_eff_D
|
| 201 |
+
|
| 202 |
+
# ∂pred/∂C = 1
|
| 203 |
+
ones = pred * 0.0 + 1.0
|
| 204 |
+
d_C = ones
|
| 205 |
+
|
| 206 |
+
# For tau_N: ∂pred/∂tau_N = ∂pred/∂eff_N * ∂eff_N/∂tau_N
|
| 207 |
+
# ∂pred/∂eff_N = -alpha * A * eff_N^(-alpha-1) = -alpha * termN / eff_N
|
| 208 |
+
dpred_deffN = -alpha[:, None] * termN / eff_N # (B, M)
|
| 209 |
+
|
| 210 |
+
# ∂eff_N/∂tau_N = U_N * (1 - exp(-R_N/tau_N) - (R_N/tau_N)*exp(-R_N/tau_N))
|
| 211 |
+
# = U_N * (1 - exp_RN - (R_N/tau_N)*exp_RN)
|
| 212 |
+
# = U_N * (1 - exp_RN*(1 + R_N/tau_N))
|
| 213 |
+
deffN_dtauN = U_N * (1.0 - exp_RN * (1.0 + R_N / tau_N_b)) # (B, M)
|
| 214 |
+
d_tau_N = dpred_deffN * deffN_dtauN * mask_eff_N
|
| 215 |
+
|
| 216 |
+
# For tau_D: ∂pred/∂tau_D = ∂pred/∂eff_D * ∂eff_D/∂tau_D
|
| 217 |
+
dpred_deffD = -alpha[:, None] * termD / eff_D # (B, M)
|
| 218 |
+
deffD_dtauD = U_D * (1.0 - exp_RD * (1.0 + R_D / tau_D_b)) # (B, M)
|
| 219 |
+
d_tau_D = dpred_deffD * deffD_dtauD * mask_eff_D
|
| 220 |
+
|
| 221 |
+
# For rho: ∂pred/∂rho = ∂pred/∂eff_N * ∂eff_N/∂U_N * ∂U_N/∂rho
|
| 222 |
+
# + ∂pred/∂eff_N * ∂eff_N/∂R_N * ∂R_N/∂U_N * ∂U_N/∂rho
|
| 223 |
+
#
|
| 224 |
+
# U_N = min(rho*U_D, N). ∂U_N/∂rho = U_D when rho*U_D < N, else 0.
|
| 225 |
+
# mask: 1 where rho*U_D < N (i.e., the min selects rho*U_D)
|
| 226 |
+
mask_rho = (rho_U_D < N_b) * 1.0 # (B, M), 1 or 0
|
| 227 |
+
|
| 228 |
+
# ∂U_N/∂rho = U_D * mask_rho
|
| 229 |
+
dUN_drho = U_D * mask_rho # (B, M)
|
| 230 |
+
|
| 231 |
+
# ∂eff_N/∂U_N via chain rule (U_N appears in eff_N, R_N):
|
| 232 |
+
# eff_N = U_N * (1 + tau_N * (1 - exp(-R_N/tau_N)))
|
| 233 |
+
# R_N = max(N/U_N - 1, 0)
|
| 234 |
+
# ∂R_N/∂U_N = -N/U_N^2 when R_N > 0, else 0
|
| 235 |
+
mask_RN = (R_N_raw > 0.0) * 1.0 # (B, M)
|
| 236 |
+
dRN_dUN = -N_b / (U_N ** 2) * mask_RN # (B, M)
|
| 237 |
+
|
| 238 |
+
# ∂eff_N/∂U_N (direct, holding R_N constant):
|
| 239 |
+
# = 1 + tau_N*(1 - exp_RN)
|
| 240 |
+
deffN_dUN_direct = 1.0 + tau_N_b * (1.0 - exp_RN) # (B, M)
|
| 241 |
+
|
| 242 |
+
# ∂eff_N/∂R_N = tau_N * U_N * (R_N/tau_N derivative of (1-exp(-R_N/tau_N)))
|
| 243 |
+
# = tau_N * U_N * (1/tau_N)*exp(-R_N/tau_N) = U_N * exp_RN
|
| 244 |
+
deffN_dRN = U_N * exp_RN # (B, M)
|
| 245 |
+
|
| 246 |
+
# total ∂eff_N/∂U_N = deffN_dUN_direct + deffN_dRN * dRN_dUN
|
| 247 |
+
deffN_dUN_total = deffN_dUN_direct + deffN_dRN * dRN_dUN # (B, M)
|
| 248 |
+
|
| 249 |
+
d_rho = dpred_deffN * deffN_dUN_total * dUN_drho * mask_eff_N
|
| 250 |
+
|
| 251 |
+
# order: [A, tau_N, B, tau_D, alpha, C, rho]
|
| 252 |
+
jac = ops.stack([d_A, d_tau_N, d_B, d_tau_D, d_alpha, d_C, d_rho], axis=-1)
|
| 253 |
+
|
| 254 |
+
if pred.shape[0] == 1:
|
| 255 |
+
return pred[0], jac[0]
|
| 256 |
+
return pred, jac
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
# Scaling law 4:
|
| 260 |
+
# L0 + A * M_n^(-a) + B * T_eff_n^(-b)
|
| 261 |
+
# where
|
| 262 |
+
# M_n = max(M / _M_REF, _EPS)
|
| 263 |
+
# T_n = max(T / _T_REF, _EPS)
|
| 264 |
+
# U_n = max(U / _U_REF, _EPS)
|
| 265 |
+
# q = T_n / max(s * U_n * M_n^d, _EPS)
|
| 266 |
+
# T_eff_n = T_n / (1 + q)
|
| 267 |
+
#
|
| 268 |
+
# theta: [L0, A, a, B, b, s, d]
|
| 269 |
+
def sl_4(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 270 |
+
ops = utils.get_ops(backend)
|
| 271 |
+
xp = ops.xp
|
| 272 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 273 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 274 |
+
|
| 275 |
+
# X: (M, 3)
|
| 276 |
+
U, M, T = X[:, 0], X[:, 1], X[:, 2]
|
| 277 |
+
|
| 278 |
+
# theta: (B, 7)
|
| 279 |
+
L0 = theta[:, 0]
|
| 280 |
+
A = theta[:, 1]
|
| 281 |
+
a = theta[:, 2]
|
| 282 |
+
Bcoef = theta[:, 3]
|
| 283 |
+
b = theta[:, 4]
|
| 284 |
+
s = theta[:, 5]
|
| 285 |
+
d = theta[:, 6]
|
| 286 |
+
|
| 287 |
+
U_b = U[None, :]
|
| 288 |
+
M_b = M[None, :]
|
| 289 |
+
T_b = T[None, :]
|
| 290 |
+
|
| 291 |
+
M_n = ops.clamp_min(M_b / _M_REF, _EPS)
|
| 292 |
+
T_n = ops.clamp_min(T_b / _T_REF, _EPS)
|
| 293 |
+
U_n = ops.clamp_min(U_b / _U_REF, _EPS)
|
| 294 |
+
|
| 295 |
+
log_M_n = xp.log(ops.clamp_min(M_n, _EPS))
|
| 296 |
+
|
| 297 |
+
scale = s[:, None] * U_n * (M_n ** d[:, None]) # (B, M)
|
| 298 |
+
scale = ops.clamp_min(scale, _EPS)
|
| 299 |
+
|
| 300 |
+
q_val = T_n / scale # (B, M)
|
| 301 |
+
denom = 1.0 + q_val # (B, M)
|
| 302 |
+
T_eff_n = T_n / denom # (B, M)
|
| 303 |
+
T_eff_n = ops.clamp_min(T_eff_n, _EPS)
|
| 304 |
+
|
| 305 |
+
log_T_eff_n = xp.log(ops.clamp_min(T_eff_n, _EPS))
|
| 306 |
+
|
| 307 |
+
Mn_neg_a = M_n ** (-a[:, None]) # (B, M)
|
| 308 |
+
Teff_neg_b = T_eff_n ** (-b[:, None]) # (B, M)
|
| 309 |
+
|
| 310 |
+
termM = A[:, None] * Mn_neg_a # (B, M)
|
| 311 |
+
termT = Bcoef[:, None] * Teff_neg_b # (B, M)
|
| 312 |
+
|
| 313 |
+
pred = L0[:, None] + termM + termT
|
| 314 |
+
|
| 315 |
+
# --- Jacobian ---
|
| 316 |
+
ones = pred * 0.0 + 1.0
|
| 317 |
+
|
| 318 |
+
# ∂/∂L0 = 1
|
| 319 |
+
d_L0 = ones
|
| 320 |
+
|
| 321 |
+
# ∂/∂A = M_n^(-a)
|
| 322 |
+
d_A = Mn_neg_a
|
| 323 |
+
|
| 324 |
+
# ∂/∂a = -termM * log(M_n)
|
| 325 |
+
d_a = -termM * log_M_n
|
| 326 |
+
|
| 327 |
+
# ∂/∂B = T_eff_n^(-b)
|
| 328 |
+
d_B = Teff_neg_b
|
| 329 |
+
|
| 330 |
+
# ∂/∂b = -termT * log(T_eff_n)
|
| 331 |
+
d_b = -termT * log_T_eff_n
|
| 332 |
+
|
| 333 |
+
# For s, d: need ∂pred/∂T_eff_n * ∂T_eff_n/∂(s or d)
|
| 334 |
+
# ∂pred/∂T_eff_n = -b * B * T_eff_n^(-b-1) = -b * termT / T_eff_n
|
| 335 |
+
dpred_dTeff = -b[:, None] * termT / T_eff_n # (B, M)
|
| 336 |
+
|
| 337 |
+
# T_eff_n = T_n / (1 + T_n/scale) = T_n * scale / (scale + T_n)
|
| 338 |
+
# ∂T_eff_n/∂scale = T_n * (scale + T_n - scale) / (scale + T_n)^2
|
| 339 |
+
# = T_n^2 / (scale + T_n)^2
|
| 340 |
+
# But scale + T_n = scale * denom, and T_eff_n = T_n / denom, so:
|
| 341 |
+
# T_n^2 / (scale * denom)^2 = (T_n/denom)^2 / scale^2 = ... let's do directly:
|
| 342 |
+
# ∂T_eff_n/∂scale = T_n^2 / (scale + T_n)^2
|
| 343 |
+
scale_plus_Tn = scale + T_n
|
| 344 |
+
dTeff_dscale = T_n ** 2 / (scale_plus_Tn ** 2) # (B, M)
|
| 345 |
+
|
| 346 |
+
# scale = s * U_n * M_n^d
|
| 347 |
+
# ∂scale/∂s = U_n * M_n^d = scale / s[:, None]
|
| 348 |
+
dscale_ds = scale / s[:, None] # (B, M)
|
| 349 |
+
|
| 350 |
+
# ∂scale/∂d = s * U_n * M_n^d * log(M_n) = scale * log(M_n)
|
| 351 |
+
dscale_dd = scale * log_M_n # (B, M)
|
| 352 |
+
|
| 353 |
+
d_s = dpred_dTeff * dTeff_dscale * dscale_ds
|
| 354 |
+
d_d = dpred_dTeff * dTeff_dscale * dscale_dd
|
| 355 |
+
|
| 356 |
+
# order: [L0, A, a, B, b, s, d]
|
| 357 |
+
jac = ops.stack([d_L0, d_A, d_a, d_B, d_b, d_s, d_d], axis=-1)
|
| 358 |
+
|
| 359 |
+
if pred.shape[0] == 1:
|
| 360 |
+
return pred[0], jac[0]
|
| 361 |
+
return pred, jac
|
| 362 |
+
|
| 363 |
+
# Scaling law 5:
|
| 364 |
+
# L = A / N^alpha + B / D_eff^beta + E
|
| 365 |
+
# where
|
| 366 |
+
# D_eff = U^gamma * D^(1 - gamma)
|
| 367 |
+
#
|
| 368 |
+
# theta: [A, alpha, B, beta, E, gamma]
|
| 369 |
+
def sl_5(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 370 |
+
ops = utils.get_ops(backend)
|
| 371 |
+
xp = ops.xp
|
| 372 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 373 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 374 |
+
|
| 375 |
+
# X: (M, 3)
|
| 376 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 377 |
+
|
| 378 |
+
# theta: (B, 6)
|
| 379 |
+
A = theta[:, 0]
|
| 380 |
+
alpha = theta[:, 1]
|
| 381 |
+
Bcoef = theta[:, 2]
|
| 382 |
+
beta = theta[:, 3]
|
| 383 |
+
E = theta[:, 4]
|
| 384 |
+
gamma = theta[:, 5]
|
| 385 |
+
|
| 386 |
+
U_b = ops.clamp_min(U[None, :], _EPS)
|
| 387 |
+
N_b = ops.clamp_min(N[None, :], _EPS)
|
| 388 |
+
D_b = ops.clamp_min(D[None, :], _EPS)
|
| 389 |
+
|
| 390 |
+
log_U = xp.log(ops.clamp_min(U_b, _EPS))
|
| 391 |
+
log_D = xp.log(ops.clamp_min(D_b, _EPS))
|
| 392 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 393 |
+
|
| 394 |
+
D_eff = (U_b ** gamma[:, None]) * (D_b ** (1.0 - gamma[:, None]))
|
| 395 |
+
D_eff = ops.clamp_min(D_eff, _EPS)
|
| 396 |
+
|
| 397 |
+
log_D_eff = xp.log(ops.clamp_min(D_eff, _EPS))
|
| 398 |
+
|
| 399 |
+
N_neg_alpha = N_b ** (-alpha[:, None]) # (B, M)
|
| 400 |
+
D_eff_neg_beta = D_eff ** (-beta[:, None]) # (B, M)
|
| 401 |
+
|
| 402 |
+
termN = A[:, None] * N_neg_alpha
|
| 403 |
+
termD = Bcoef[:, None] * D_eff_neg_beta
|
| 404 |
+
|
| 405 |
+
pred = termN + termD + E[:, None]
|
| 406 |
+
|
| 407 |
+
# --- Jacobian ---
|
| 408 |
+
ones = pred * 0.0 + 1.0
|
| 409 |
+
|
| 410 |
+
# ∂/∂A = N^(-alpha)
|
| 411 |
+
d_A = N_neg_alpha
|
| 412 |
+
|
| 413 |
+
# ∂/∂alpha = -termN * log(N)
|
| 414 |
+
d_alpha = -termN * log_N
|
| 415 |
+
|
| 416 |
+
# ∂/∂B = D_eff^(-beta)
|
| 417 |
+
d_B = D_eff_neg_beta
|
| 418 |
+
|
| 419 |
+
# ∂/∂beta = -termD * log(D_eff)
|
| 420 |
+
d_beta = -termD * log_D_eff
|
| 421 |
+
|
| 422 |
+
# ∂/∂E = 1
|
| 423 |
+
d_E = ones
|
| 424 |
+
|
| 425 |
+
# ∂/∂gamma: D_eff = U^gamma * D^(1-gamma)
|
| 426 |
+
# log(D_eff) = gamma*log(U) + (1-gamma)*log(D)
|
| 427 |
+
# ∂log(D_eff)/∂gamma = log(U) - log(D) = log(U/D)
|
| 428 |
+
# ∂D_eff/∂gamma = D_eff * (log(U) - log(D))
|
| 429 |
+
# ∂pred/∂gamma = ∂pred/∂D_eff * ∂D_eff/∂gamma
|
| 430 |
+
# ∂pred/∂D_eff = -beta * B * D_eff^(-beta-1) = -beta * termD / D_eff
|
| 431 |
+
dpred_dDeff = -beta[:, None] * termD / D_eff
|
| 432 |
+
dDeff_dgamma = D_eff * (log_U - log_D)
|
| 433 |
+
d_gamma = dpred_dDeff * dDeff_dgamma
|
| 434 |
+
|
| 435 |
+
# order: [A, alpha, B, beta, E, gamma]
|
| 436 |
+
jac = ops.stack([d_A, d_alpha, d_B, d_beta, d_E, d_gamma], axis=-1)
|
| 437 |
+
|
| 438 |
+
if pred.shape[0] == 1:
|
| 439 |
+
return pred[0], jac[0]
|
| 440 |
+
return pred, jac
|
| 441 |
+
|
| 442 |
+
# Scaling law 6 (8p): Chinchilla + repeat-penalty factor
|
| 443 |
+
# R = D / U; factor = 1 + C * max(R - 1, 0)^c * N^d
|
| 444 |
+
# D_eff = D / factor
|
| 445 |
+
# loss = E + A * N^(-alpha) + B * D_eff^(-beta)
|
| 446 |
+
# theta: [E, A, alpha, B, beta, C, c, d]
|
| 447 |
+
def sl_6(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 448 |
+
ops = utils.get_ops(backend)
|
| 449 |
+
xp = ops.xp
|
| 450 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 451 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 452 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 453 |
+
Ep = theta[:, 0]
|
| 454 |
+
A = theta[:, 1]
|
| 455 |
+
alpha = theta[:, 2]
|
| 456 |
+
Bcoef = theta[:, 3]
|
| 457 |
+
beta = theta[:, 4]
|
| 458 |
+
Cc = theta[:, 5]
|
| 459 |
+
c = theta[:, 6]
|
| 460 |
+
d = theta[:, 7]
|
| 461 |
+
|
| 462 |
+
U_b = ops.clamp_min(U[None, :], _EPS)
|
| 463 |
+
N_b = ops.clamp_min(N[None, :], _EPS)
|
| 464 |
+
D_b = ops.clamp_min(D[None, :], _EPS)
|
| 465 |
+
|
| 466 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 467 |
+
|
| 468 |
+
R = D_b / U_b # (1, M)
|
| 469 |
+
repeat_excess = ops.clamp_min(R - 1.0, 0.0) # (1, M)
|
| 470 |
+
log_re = xp.log(ops.clamp_min(repeat_excess, _EPS))
|
| 471 |
+
|
| 472 |
+
re_c = repeat_excess ** c[:, None] # (B, M)
|
| 473 |
+
N_d = N_b ** d[:, None] # (B, M)
|
| 474 |
+
penalty = Cc[:, None] * re_c * N_d # (B, M)
|
| 475 |
+
factor = 1.0 + penalty # (B, M)
|
| 476 |
+
factor_safe = ops.clamp_min(factor, _EPS)
|
| 477 |
+
|
| 478 |
+
D_eff = D_b / factor_safe # (B, M)
|
| 479 |
+
D_eff = ops.clamp_min(D_eff, _EPS)
|
| 480 |
+
|
| 481 |
+
log_D_eff = xp.log(ops.clamp_min(D_eff, _EPS))
|
| 482 |
+
|
| 483 |
+
N_neg_alpha = N_b ** (-alpha[:, None]) # (B, M)
|
| 484 |
+
D_eff_neg_beta = D_eff ** (-beta[:, None]) # (B, M)
|
| 485 |
+
|
| 486 |
+
termN = A[:, None] * N_neg_alpha # (B, M)
|
| 487 |
+
termD = Bcoef[:, None] * D_eff_neg_beta # (B, M)
|
| 488 |
+
|
| 489 |
+
pred = Ep[:, None] + termN + termD
|
| 490 |
+
|
| 491 |
+
# --- Jacobian ---
|
| 492 |
+
ones = pred * 0.0 + 1.0
|
| 493 |
+
|
| 494 |
+
# ∂/∂E = 1
|
| 495 |
+
d_E = ones
|
| 496 |
+
|
| 497 |
+
# ∂/∂A = N^(-alpha)
|
| 498 |
+
d_A = N_neg_alpha
|
| 499 |
+
|
| 500 |
+
# ∂/∂alpha = -termN * log(N)
|
| 501 |
+
d_alpha = -termN * log_N
|
| 502 |
+
|
| 503 |
+
# ∂/∂B = D_eff^(-beta)
|
| 504 |
+
d_B = D_eff_neg_beta
|
| 505 |
+
|
| 506 |
+
# ∂/∂beta = -termD * log(D_eff)
|
| 507 |
+
d_beta = -termD * log_D_eff
|
| 508 |
+
|
| 509 |
+
# For C, c, d: need ∂pred/∂D_eff * ∂D_eff/∂factor * ∂factor/∂param
|
| 510 |
+
# ∂pred/∂D_eff = -beta * B * D_eff^(-beta-1) = -beta * termD / D_eff
|
| 511 |
+
dpred_dDeff = -beta[:, None] * termD / D_eff # (B, M)
|
| 512 |
+
|
| 513 |
+
# D_eff = D / factor => ∂D_eff/∂factor = -D / factor^2 = -D_eff / factor
|
| 514 |
+
dDeff_dfactor = -D_eff / factor_safe # (B, M)
|
| 515 |
+
|
| 516 |
+
dpred_dfactor = dpred_dDeff * dDeff_dfactor # (B, M)
|
| 517 |
+
|
| 518 |
+
# factor = 1 + C * re^c * N^d
|
| 519 |
+
# ∂factor/∂C = re^c * N^d = penalty / Cc[:, None]
|
| 520 |
+
# But Cc could be 0, so compute directly:
|
| 521 |
+
dfactor_dC = re_c * N_d # (B, M)
|
| 522 |
+
|
| 523 |
+
# ∂factor/∂c = C * re^c * log(re) * N^d = penalty * log(re)
|
| 524 |
+
dfactor_dc = penalty * log_re # (B, M)
|
| 525 |
+
|
| 526 |
+
# ∂factor/∂d = C * re^c * N^d * log(N) = penalty * log(N)
|
| 527 |
+
dfactor_dd = penalty * log_N # (B, M)
|
| 528 |
+
|
| 529 |
+
d_Cc = dpred_dfactor * dfactor_dC
|
| 530 |
+
d_c = dpred_dfactor * dfactor_dc
|
| 531 |
+
d_d = dpred_dfactor * dfactor_dd
|
| 532 |
+
|
| 533 |
+
# order: [E, A, alpha, B, beta, C, c, d]
|
| 534 |
+
jac = ops.stack([d_E, d_A, d_alpha, d_B, d_beta, d_Cc, d_c, d_d], axis=-1)
|
| 535 |
+
|
| 536 |
+
if pred.shape[0] == 1:
|
| 537 |
+
return pred[0], jac[0]
|
| 538 |
+
return pred, jac
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
# Scaling law 7 (7p): Multiplicative (N*U) product + additive terms
|
| 542 |
+
# loss = L0 + A * (N * U)^alpha_pu + B * D^alpha_t + C * N^alpha_p
|
| 543 |
+
# theta: [L0, A, alpha_pu, B, alpha_t, C, alpha_p]
|
| 544 |
+
def sl_7(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 545 |
+
ops = utils.get_ops(backend)
|
| 546 |
+
xp = ops.xp
|
| 547 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 548 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 549 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 550 |
+
L0 = theta[:, 0]
|
| 551 |
+
A = theta[:, 1]
|
| 552 |
+
alpha_pu = theta[:, 2]
|
| 553 |
+
Bcoef = theta[:, 3]
|
| 554 |
+
alpha_t = theta[:, 4]
|
| 555 |
+
Cc = theta[:, 5]
|
| 556 |
+
alpha_p = theta[:, 6]
|
| 557 |
+
|
| 558 |
+
U_b = ops.clamp_min(U[None, :], _EPS)
|
| 559 |
+
N_b = ops.clamp_min(N[None, :], _EPS)
|
| 560 |
+
D_b = ops.clamp_min(D[None, :], _EPS)
|
| 561 |
+
NU = ops.clamp_min(N_b * U_b, _EPS)
|
| 562 |
+
|
| 563 |
+
log_NU = xp.log(ops.clamp_min(NU, _EPS))
|
| 564 |
+
log_D = xp.log(ops.clamp_min(D_b, _EPS))
|
| 565 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 566 |
+
|
| 567 |
+
NU_apu = NU ** alpha_pu[:, None] # (B, M)
|
| 568 |
+
D_at = D_b ** alpha_t[:, None] # (B, M)
|
| 569 |
+
N_ap = N_b ** alpha_p[:, None] # (B, M)
|
| 570 |
+
|
| 571 |
+
term2 = A[:, None] * NU_apu
|
| 572 |
+
term3 = Bcoef[:, None] * D_at
|
| 573 |
+
term4 = Cc[:, None] * N_ap
|
| 574 |
+
|
| 575 |
+
pred = L0[:, None] + term2 + term3 + term4
|
| 576 |
+
|
| 577 |
+
# --- Jacobian ---
|
| 578 |
+
ones = pred * 0.0 + 1.0
|
| 579 |
+
|
| 580 |
+
d_L0 = ones
|
| 581 |
+
d_A = NU_apu
|
| 582 |
+
d_alpha_pu = term2 * log_NU
|
| 583 |
+
d_B = D_at
|
| 584 |
+
d_alpha_t = term3 * log_D
|
| 585 |
+
d_C = N_ap
|
| 586 |
+
d_alpha_p = term4 * log_N
|
| 587 |
+
|
| 588 |
+
# order: [L0, A, alpha_pu, B, alpha_t, C, alpha_p]
|
| 589 |
+
jac = ops.stack([d_L0, d_A, d_alpha_pu, d_B, d_alpha_t, d_C, d_alpha_p], axis=-1)
|
| 590 |
+
|
| 591 |
+
if pred.shape[0] == 1:
|
| 592 |
+
return pred[0], jac[0]
|
| 593 |
+
return pred, jac
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
# Scaling law 8 (7p): Log-ratio vocabulary saturation
|
| 597 |
+
# vocab_ratio = log(U / D + 1)
|
| 598 |
+
# loss = a + b / D^alpha + c / N^beta + d * |vocab_ratio|^gamma
|
| 599 |
+
# theta: [a, b, c, d, alpha, beta, gamma]
|
| 600 |
+
def sl_8(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 601 |
+
ops = utils.get_ops(backend)
|
| 602 |
+
xp = ops.xp
|
| 603 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 604 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 605 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 606 |
+
a0 = theta[:, 0]
|
| 607 |
+
b0 = theta[:, 1]
|
| 608 |
+
c0 = theta[:, 2]
|
| 609 |
+
d0 = theta[:, 3]
|
| 610 |
+
alpha = theta[:, 4]
|
| 611 |
+
beta = theta[:, 5]
|
| 612 |
+
gamma = theta[:, 6]
|
| 613 |
+
|
| 614 |
+
U_b = ops.clamp_min(U[None, :], _EPS)
|
| 615 |
+
N_b = ops.clamp_min(N[None, :], _EPS)
|
| 616 |
+
D_b = ops.clamp_min(D[None, :], _EPS)
|
| 617 |
+
|
| 618 |
+
log_D = xp.log(ops.clamp_min(D_b, _EPS))
|
| 619 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 620 |
+
|
| 621 |
+
D_neg_alpha = D_b ** (-alpha[:, None]) # (B, M)
|
| 622 |
+
N_neg_beta = N_b ** (-beta[:, None]) # (B, M)
|
| 623 |
+
|
| 624 |
+
termD = b0[:, None] * D_neg_alpha # b/D^alpha
|
| 625 |
+
termN = c0[:, None] * N_neg_beta # c/N^beta
|
| 626 |
+
|
| 627 |
+
vocab_ratio = xp.log(U_b / D_b + 1.0) # (1, M) or (B, M)
|
| 628 |
+
abs_vr = ops.clamp_min(xp.abs(vocab_ratio) if hasattr(xp, 'abs') else ops.maximum(vocab_ratio, -vocab_ratio), _EPS)
|
| 629 |
+
log_abs_vr = xp.log(ops.clamp_min(abs_vr, _EPS))
|
| 630 |
+
|
| 631 |
+
abs_vr_gamma = abs_vr ** gamma[:, None] # (B, M)
|
| 632 |
+
termV = d0[:, None] * abs_vr_gamma # d*|vr|^gamma
|
| 633 |
+
|
| 634 |
+
pred = a0[:, None] + termD + termN + termV
|
| 635 |
+
|
| 636 |
+
# --- Jacobian ---
|
| 637 |
+
ones = pred * 0.0 + 1.0
|
| 638 |
+
|
| 639 |
+
d_a = ones
|
| 640 |
+
d_b = D_neg_alpha # ∂/∂b = 1/D^alpha
|
| 641 |
+
d_c = N_neg_beta # ∂/∂c = 1/N^beta
|
| 642 |
+
d_d = abs_vr_gamma # ∂/∂d = |vr|^gamma
|
| 643 |
+
d_alpha = -termD * log_D # ∂/∂alpha = -b*D^(-alpha)*log(D)
|
| 644 |
+
d_beta = -termN * log_N # ∂/∂beta = -c*N^(-beta)*log(N)
|
| 645 |
+
d_gamma = termV * log_abs_vr # ∂/∂gamma = d*|vr|^gamma*log(|vr|)
|
| 646 |
+
|
| 647 |
+
# order: [a, b, c, d, alpha, beta, gamma]
|
| 648 |
+
jac = ops.stack([d_a, d_b, d_c, d_d, d_alpha, d_beta, d_gamma], axis=-1)
|
| 649 |
+
|
| 650 |
+
if pred.shape[0] == 1:
|
| 651 |
+
return pred[0], jac[0]
|
| 652 |
+
return pred, jac
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
# Scaling law 9 (7p): Multiplicative data-quality modulation
|
| 656 |
+
# loss = A / N^alpha + B / D^beta * (1 + C / U^gamma) + L_inf
|
| 657 |
+
# theta: [A, alpha, B, beta, C, gamma, L_inf]
|
| 658 |
+
def sl_9(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 659 |
+
ops = utils.get_ops(backend)
|
| 660 |
+
xp = ops.xp
|
| 661 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 662 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 663 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 664 |
+
A = theta[:, 0]
|
| 665 |
+
alpha = theta[:, 1]
|
| 666 |
+
Bcoef = theta[:, 2]
|
| 667 |
+
beta = theta[:, 3]
|
| 668 |
+
Cc = theta[:, 4]
|
| 669 |
+
gamma = theta[:, 5]
|
| 670 |
+
L_inf = theta[:, 6]
|
| 671 |
+
|
| 672 |
+
U_b = ops.clamp_min(U[None, :], _EPS)
|
| 673 |
+
N_b = ops.clamp_min(N[None, :], _EPS)
|
| 674 |
+
D_b = ops.clamp_min(D[None, :], _EPS)
|
| 675 |
+
|
| 676 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 677 |
+
log_D = xp.log(ops.clamp_min(D_b, _EPS))
|
| 678 |
+
log_U = xp.log(ops.clamp_min(U_b, _EPS))
|
| 679 |
+
|
| 680 |
+
N_neg_alpha = N_b ** (-alpha[:, None]) # (B, M)
|
| 681 |
+
D_neg_beta = D_b ** (-beta[:, None]) # (B, M)
|
| 682 |
+
U_neg_gamma = U_b ** (-gamma[:, None]) # (B, M)
|
| 683 |
+
|
| 684 |
+
termN = A[:, None] * N_neg_alpha # A/N^alpha
|
| 685 |
+
quality = 1.0 + Cc[:, None] * U_neg_gamma # 1 + C/U^gamma
|
| 686 |
+
data_base = Bcoef[:, None] * D_neg_beta # B/D^beta
|
| 687 |
+
data_term = data_base * quality # B/D^beta * (1 + C/U^gamma)
|
| 688 |
+
|
| 689 |
+
pred = termN + data_term + L_inf[:, None]
|
| 690 |
+
|
| 691 |
+
# --- Jacobian ---
|
| 692 |
+
ones = pred * 0.0 + 1.0
|
| 693 |
+
|
| 694 |
+
# ∂/∂A = N^(-alpha)
|
| 695 |
+
d_A = N_neg_alpha
|
| 696 |
+
|
| 697 |
+
# ∂/∂alpha = -termN * log(N)
|
| 698 |
+
d_alpha = -termN * log_N
|
| 699 |
+
|
| 700 |
+
# ∂/∂B = D^(-beta) * quality
|
| 701 |
+
d_B = D_neg_beta * quality
|
| 702 |
+
|
| 703 |
+
# ∂/∂beta = -data_term * log(D)
|
| 704 |
+
d_beta = -data_term * log_D
|
| 705 |
+
|
| 706 |
+
# ∂/∂C = B/D^beta * U^(-gamma) = data_base * U^(-gamma)
|
| 707 |
+
d_C = data_base * U_neg_gamma
|
| 708 |
+
|
| 709 |
+
# ∂/∂gamma = B/D^beta * C * (-U^(-gamma)) * log(U)
|
| 710 |
+
# = -data_base * C * U^(-gamma) * log(U)
|
| 711 |
+
d_gamma = -data_base * Cc[:, None] * U_neg_gamma * log_U
|
| 712 |
+
|
| 713 |
+
# ∂/∂L_inf = 1
|
| 714 |
+
d_Linf = ones
|
| 715 |
+
|
| 716 |
+
# order: [A, alpha, B, beta, C, gamma, L_inf]
|
| 717 |
+
jac = ops.stack([d_A, d_alpha, d_B, d_beta, d_C, d_gamma, d_Linf], axis=-1)
|
| 718 |
+
|
| 719 |
+
if pred.shape[0] == 1:
|
| 720 |
+
return pred[0], jac[0]
|
| 721 |
+
return pred, jac
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
# Scaling law 10 (7p): Generalized mean (Lq-norm) data term
|
| 725 |
+
# loss = L0 + A * N^(-a) + B * (D^(-b*q) + (k*U)^(-b*q))^(1/q)
|
| 726 |
+
# theta: [L0, A, B, a, b, k, q]
|
| 727 |
+
def sl_10(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 728 |
+
ops = utils.get_ops(backend)
|
| 729 |
+
xp = ops.xp
|
| 730 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 731 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 732 |
+
U, N, D = X[:, 0], X[:, 1], X[:, 2]
|
| 733 |
+
L0 = theta[:, 0]
|
| 734 |
+
A = theta[:, 1]
|
| 735 |
+
Bcoef = theta[:, 2]
|
| 736 |
+
a = theta[:, 3]
|
| 737 |
+
b = theta[:, 4]
|
| 738 |
+
k = theta[:, 5]
|
| 739 |
+
q = theta[:, 6]
|
| 740 |
+
|
| 741 |
+
U_b = ops.clamp_min(U[None, :], _EPS)
|
| 742 |
+
N_b = ops.clamp_min(N[None, :], _EPS)
|
| 743 |
+
D_b = ops.clamp_min(D[None, :], _EPS)
|
| 744 |
+
|
| 745 |
+
log_N = xp.log(ops.clamp_min(N_b, _EPS))
|
| 746 |
+
log_D = xp.log(ops.clamp_min(D_b, _EPS))
|
| 747 |
+
|
| 748 |
+
q_safe = ops.clamp_min(q, _EPS)
|
| 749 |
+
bq = b[:, None] * q_safe[:, None] # (B, M-broadcast) -> (B, 1)
|
| 750 |
+
|
| 751 |
+
t_D = D_b ** (-bq) # (B, M)
|
| 752 |
+
kU = ops.clamp_min(k[:, None] * U_b, _EPS) # (B, M)
|
| 753 |
+
log_kU = xp.log(ops.clamp_min(kU, _EPS))
|
| 754 |
+
t_U = kU ** (-bq) # (B, M)
|
| 755 |
+
|
| 756 |
+
S = t_D + t_U # (B, M)
|
| 757 |
+
S_safe = ops.clamp_min(S, _EPS)
|
| 758 |
+
log_S = xp.log(ops.clamp_min(S_safe, _EPS))
|
| 759 |
+
|
| 760 |
+
inv_q = 1.0 / q_safe[:, None] # (B, 1)
|
| 761 |
+
gm = S_safe ** inv_q # (B, M)
|
| 762 |
+
|
| 763 |
+
N_neg_a = N_b ** (-a[:, None]) # (B, M)
|
| 764 |
+
|
| 765 |
+
termN = A[:, None] * N_neg_a # (B, M)
|
| 766 |
+
termG = Bcoef[:, None] * gm # (B, M)
|
| 767 |
+
|
| 768 |
+
pred = L0[:, None] + termN + termG
|
| 769 |
+
|
| 770 |
+
# --- Jacobian ---
|
| 771 |
+
ones = pred * 0.0 + 1.0
|
| 772 |
+
|
| 773 |
+
# ∂/∂L0 = 1
|
| 774 |
+
d_L0 = ones
|
| 775 |
+
|
| 776 |
+
# ∂/∂A = N^(-a)
|
| 777 |
+
d_A = N_neg_a
|
| 778 |
+
|
| 779 |
+
# ∂/∂B = gm
|
| 780 |
+
d_B = gm
|
| 781 |
+
|
| 782 |
+
# ∂/∂a = -termN * log(N)
|
| 783 |
+
d_a = -termN * log_N
|
| 784 |
+
|
| 785 |
+
# For b, k, q we need derivatives through gm = S^(1/q)
|
| 786 |
+
# Let's compute ∂gm/∂S first:
|
| 787 |
+
# gm = S^(1/q) => ∂gm/∂S = (1/q) * S^(1/q - 1) = gm / (q * S)
|
| 788 |
+
dgm_dS = gm / (q_safe[:, None] * S_safe) # (B, M)
|
| 789 |
+
|
| 790 |
+
# ∂S/∂b: S = D^(-bq) + (kU)^(-bq)
|
| 791 |
+
# t_D = D^(-bq), ∂t_D/∂b = -q * t_D * log(D) (since ∂(-bq)/∂b = -q)
|
| 792 |
+
# t_U = (kU)^(-bq), ∂t_U/∂b = -q * t_U * log(kU)
|
| 793 |
+
dS_db = -q_safe[:, None] * (t_D * log_D + t_U * log_kU) # (B, M)
|
| 794 |
+
|
| 795 |
+
# Also need ∂gm/∂b via the exponent: gm = S^(1/q), S depends on b
|
| 796 |
+
# ∂gm/∂b = dgm_dS * dS_db
|
| 797 |
+
dgm_db = dgm_dS * dS_db
|
| 798 |
+
d_b = Bcoef[:, None] * dgm_db # (B, M)
|
| 799 |
+
|
| 800 |
+
# ∂S/∂k: t_U = (kU)^(-bq)
|
| 801 |
+
# ∂t_U/∂k = -bq * (kU)^(-bq-1) * U = -bq * t_U / (kU) * U = -bq * t_U / k[:, None]
|
| 802 |
+
# (since kU = k*U, ∂(kU)/∂k = U, and ∂t_U/∂(kU) = -bq * (kU)^(-bq-1))
|
| 803 |
+
dS_dk = -bq * t_U / k[:, None] # (B, M)
|
| 804 |
+
dgm_dk = dgm_dS * dS_dk
|
| 805 |
+
d_k = Bcoef[:, None] * dgm_dk # (B, M)
|
| 806 |
+
|
| 807 |
+
# ∂gm/∂q: gm = S^(1/q), where both S and the exponent 1/q depend on q.
|
| 808 |
+
# Use: log(gm) = (1/q) * log(S)
|
| 809 |
+
# ∂log(gm)/∂q = ∂(1/q)/∂q * log(S) + (1/q) * ∂log(S)/∂q
|
| 810 |
+
# = (-1/q^2) * log(S) + (1/q) * (1/S) * ∂S/∂q
|
| 811 |
+
# ∂gm/∂q = gm * [(-1/q^2)*log(S) + (1/(q*S))*∂S/∂q]
|
| 812 |
+
#
|
| 813 |
+
# ∂S/∂q: t_D = D^(-bq) => ∂t_D/∂q = -b * t_D * log(D)
|
| 814 |
+
# t_U = (kU)^(-bq) => ∂t_U/∂q = -b * t_U * log(kU)
|
| 815 |
+
dS_dq = -b[:, None] * (t_D * log_D + t_U * log_kU) # (B, M)
|
| 816 |
+
|
| 817 |
+
q2 = q_safe[:, None] ** 2
|
| 818 |
+
dgm_dq = gm * (-log_S / q2 + dS_dq / (q_safe[:, None] * S_safe))
|
| 819 |
+
d_q = Bcoef[:, None] * dgm_dq # (B, M)
|
| 820 |
+
|
| 821 |
+
# order: [L0, A, B, a, b, k, q]
|
| 822 |
+
jac = ops.stack([d_L0, d_A, d_B, d_a, d_b, d_k, d_q], axis=-1)
|
| 823 |
+
|
| 824 |
+
if pred.shape[0] == 1:
|
| 825 |
+
return pred[0], jac[0]
|
| 826 |
+
return pred, jac
|
| 827 |
+
|
| 828 |
+
|
| 829 |
+
PARAM_BOUNDS = {
|
| 830 |
+
# Dataset: U (unique_tokens) ~ 1e8–2e11, N (params) ~ 7e6–9e9,
|
| 831 |
+
# D (tokens) ~ 1e8–9e11, Loss ~ 2.3–8.1
|
| 832 |
+
#
|
| 833 |
+
# Bound derivation:
|
| 834 |
+
# - Decay exponents (alpha, beta, etc.): (0.05, 2.0) — physically positive,
|
| 835 |
+
# upper limit avoids numerically useless landscape regions.
|
| 836 |
+
# - Mixed/signed exponents (gamma, delta, alpha_pu, …): tight range around
|
| 837 |
+
# observed optimal, typically (-2, 0.5) for negative-decay parameters.
|
| 838 |
+
# - Coefficients for N-decay terms A/N^alpha:
|
| 839 |
+
# A_max ≈ L_max * N_min^alpha_max = 8 * (7e6)^2 ≈ 4e14 → use 1e9 with
|
| 840 |
+
# alpha restricted to ≤ 2 (optimizer stays near typical alpha ≈ 0.3–1.0).
|
| 841 |
+
# - Loss-floor constants: (-3, 10) — loss ∈ [2.3, 8.1], floor < total loss.
|
| 842 |
+
# - Structural params (tau, rho, s, k, q): derived from data ratios / physics.
|
| 843 |
+
#
|
| 844 |
+
# Overflow: all float64-safe; exp(-R/tau) underflows to 0 (never NaN);
|
| 845 |
+
# power terms at extreme bounds are O(1e50) at worst, well below 1e308.
|
| 846 |
+
|
| 847 |
+
# sl_1: [A, alpha, B, beta, E, gamma, delta]
|
| 848 |
+
# A/N^alpha + B/D^beta + E*(U^gamma * N^delta)
|
| 849 |
+
# Optimal approx: A~5e5 (alpha~0.82), B~1e5 (beta~0.56), E~12, gamma~-0.04, delta~-0.03
|
| 850 |
+
# E*U^gamma*N^delta is a small "repeat-floor" term; restrict |gamma|,|delta|<=0.5
|
| 851 |
+
# so the floor stays O(1–100) and E doesn't need astronomically large values.
|
| 852 |
+
"sl_1": [(1e-3, 1e9), (0.05, 2.0), (1e-3, 1e8), (0.05, 2.0),
|
| 853 |
+
(-100, 200), (-0.5, 0.5), (-0.5, 0.5)],
|
| 854 |
+
|
| 855 |
+
# sl_2: [a, b, c, d, p, q, r]
|
| 856 |
+
# a + b*U^p + c*N^q + d*D^r
|
| 857 |
+
# Optimal approx: a~2, b~17–700, c~3400–5300, d~1e5, p~-0.14 to -0.38,
|
| 858 |
+
# q~-0.48 to -0.51, r~-0.56 to -0.57
|
| 859 |
+
# Coefficients: b*U^p ~ O(1) at U~1e9, p~-0.4 → b ~ 3/U^(-0.4) ~ 3e4 max;
|
| 860 |
+
# similarly c ~ 3e5, d ~ 1e6. Use 10x margin.
|
| 861 |
+
"sl_2": [(-3, 10), (-1e6, 1e6), (-1e6, 1e6), (-1e7, 1e7),
|
| 862 |
+
(-1.5, 0.5), (-1.5, 0.5), (-1.5, 0.5)],
|
| 863 |
+
|
| 864 |
+
# sl_3: [A, tau_N, B, tau_D, alpha, C, rho]
|
| 865 |
+
# Muennighoff data-constrained formula with effective N/D via exponential saturation.
|
| 866 |
+
# Optimal approx: A~2345, tau_N~0.07, B~14500, tau_D~31, alpha~0.45, C~2.3, rho~0.82
|
| 867 |
+
# tau: dimensionless repeat counts; tau_N can be very small (<<1) or up to O(100).
|
| 868 |
+
# rho: fraction U/N at crossover; rho*U_D is compared to N, range from 0.001 to 100.
|
| 869 |
+
"sl_3": [(1e-3, 1e7), (1e-3, 500), (1e-3, 1e7), (1e-3, 500),
|
| 870 |
+
(0.05, 2.0), (-3, 10), (1e-3, 100)],
|
| 871 |
+
|
| 872 |
+
# sl_4: [L0, A, a, B, b, s, d]
|
| 873 |
+
# L0 + A*M_n^(-a) + B*T_eff_n^(-b), T_eff_n = T_n/(1+T_n/(s*U_n*M_n^d))
|
| 874 |
+
# _M_REF=_T_REF=_U_REF=1 so M_n=N~7e6–9e9, T_n=D~1e8–9e11, U_n=U~1e8–2e11.
|
| 875 |
+
# Optimal approx: L0~2.5, A~4e6 (a~0.92), B~12500 (b~0.44), s~197, d~-0.13
|
| 876 |
+
# s*U_n*M_n^d ~ D at crossover; s ~ D/(U*N^d) ~ 1e10/(4e9*(2.5e8)^(-0.13)) ~ 200.
|
| 877 |
+
"sl_4": [(-3, 8), (1e-3, 1e11), (0.05, 2.5), (1e-3, 1e9),
|
| 878 |
+
(0.05, 2.5), (1e-5, 1e7), (-2.0, 1.5)],
|
| 879 |
+
|
| 880 |
+
# sl_5: [A, alpha, B, beta, E, gamma]
|
| 881 |
+
# A/N^alpha + B/D_eff^beta + E, D_eff = U^gamma * D^(1-gamma)
|
| 882 |
+
# gamma in [0,1]: D_eff is a geometric mean of U and D (U^gamma*D^(1-gamma)).
|
| 883 |
+
# Optimal approx: A~173 (alpha~0.28), B~1.7e6 (beta~0.71), E~2.3, gamma~0.34
|
| 884 |
+
# D_eff at gamma=0.34: D_eff ~ (4e9)^0.34*(1e10)^0.66 ~ 7e9; B/D_eff^0.71 ~ O(1).
|
| 885 |
+
"sl_5": [(1e-3, 1e10), (0.05, 2.0), (1e-3, 1e10), (0.05, 2.0),
|
| 886 |
+
(-3, 10), (0.0, 1.0)],
|
| 887 |
+
|
| 888 |
+
# sl_6: [E, A, alpha, B, beta, C, c, d]
|
| 889 |
+
# E + A*N^(-alpha) + B*D_eff^(-beta),
|
| 890 |
+
# D_eff = D / max(1 + C*(max(D/U-1,0))^c * N^d, eps)
|
| 891 |
+
# Optimal approx: E~3, A~1.6e6 (alpha~0.86), B~7e8 (beta~1.04), C~0.3, c~0.83, d~0.02
|
| 892 |
+
# D/U-1 ranges 0–8999; factor = 1+C*(8999)^c*N^d; with C=0.3,c=0.83,d=0.02 → factor~700.
|
| 893 |
+
# C>=0 (repeat penalty must increase factor); c in [0,2]; d in [-1,1].
|
| 894 |
+
"sl_6": [(-3, 10), (1e-3, 1e11), (0.05, 2.0), (1e-3, 1e12),
|
| 895 |
+
(0.05, 2.0), (0, 1e4), (0, 2.0), (-1.0, 1.0)],
|
| 896 |
+
|
| 897 |
+
# sl_7: [L0, A, alpha_pu, B, alpha_t, C, alpha_p]
|
| 898 |
+
# L0 + A*(N*U)^alpha_pu + B*D^alpha_t + C*N^alpha_p
|
| 899 |
+
# N*U range: ~7e14–1.5e21; exponents alpha_pu, alpha_t, alpha_p are negative (decay).
|
| 900 |
+
# Optimal approx: L0~2.4, A~1100 (alpha_pu~-0.18), B~95000 (alpha_t~-0.55),
|
| 901 |
+
# C~7e11 (alpha_p~-1.77, nearly zero contribution at typical N).
|
| 902 |
+
"sl_7": [(-3, 10), (1e-3, 1e9), (-2.0, 0.5), (1e-3, 1e9),
|
| 903 |
+
(-2.0, 0.5), (-1e13, 1e13), (-2.5, 0.5)],
|
| 904 |
+
|
| 905 |
+
# sl_8: [a, b, c, d, alpha, beta, gamma]
|
| 906 |
+
# a + b/D^alpha + c/N^beta + d*|log(U/D+1)|^gamma
|
| 907 |
+
# vocab_ratio = log(U/D+1) in [0, ~7.5] over this dataset.
|
| 908 |
+
# WARNING: as gamma->0, |vr|^gamma->1, making a and d unidentifiable (a+d = const).
|
| 909 |
+
# Restrict gamma >= 0.05 to reduce degeneracy; fix a in (-3, 10).
|
| 910 |
+
# Optimal approx (non-degenerate): a~3.5–3.9, b~8000–9600, c~6400–9000,
|
| 911 |
+
# d~-1.7, alpha~0.42, beta~0.54, gamma~0.1–0.4.
|
| 912 |
+
"sl_8": [(-3, 10), (-1e8, 1e8), (-1e8, 1e8), (-200, 200),
|
| 913 |
+
(0.05, 2.0), (0.05, 2.0), (0.05, 5.0)],
|
| 914 |
+
|
| 915 |
+
# sl_9: [A, alpha, B, beta, C, gamma, L_inf]
|
| 916 |
+
# A/N^alpha + B/D^beta * (1 + C/U^gamma) + L_inf
|
| 917 |
+
# Optimal approx: A~90 (alpha~0.22), B~16000 (beta~0.47),
|
| 918 |
+
# C~1.1e9 (gamma~1.20), L_inf~2.0
|
| 919 |
+
# C/U^gamma at U_typ=4e9, gamma=1.2: C/(4e9)^1.2 ~ 1.1e9/2.4e10 ~ 0.046 (small factor).
|
| 920 |
+
# C can be large because U is also large; allow up to 1e11.
|
| 921 |
+
"sl_9": [(1e-3, 1e7), (0.05, 2.0), (1e-3, 1e7), (0.05, 2.0),
|
| 922 |
+
(0, 1e11), (0.05, 2.0), (-3, 10)],
|
| 923 |
+
|
| 924 |
+
# sl_10: [L0, A, B, a, b, k, q]
|
| 925 |
+
# L0 + A*N^(-a) + B*(D^(-b*q) + (k*U)^(-b*q))^(1/q)
|
| 926 |
+
# q: generalized-mean exponent; k: scales U to match D in the Lq-norm.
|
| 927 |
+
# Optimal approx: L0~2.3, A~14500 (a~0.63), B~1900 (b~0.37), k~23, q~12–16.
|
| 928 |
+
# k*U_typ=23*4e9=9e10 ~ D_typ=1e10 (same order, reasonable crossover).
|
| 929 |
+
# q can be large (O(10–20)) meaning the law approaches a hard min over D and k*U.
|
| 930 |
+
"sl_10": [(-3, 8), (1e-3, 1e9), (1e-3, 1e9), (0.05, 2.0),
|
| 931 |
+
(0.05, 2.0), (1e-3, 1e4), (0.1, 50.0)],
|
| 932 |
+
}
|
| 933 |
+
|
| 934 |
+
LAW_REGISTRY = {
|
| 935 |
+
"sl_1": sl_1, "sl_2": sl_2, "sl_3": sl_3, "sl_4": sl_4, "sl_5": sl_5,
|
| 936 |
+
"sl_6": sl_6, "sl_7": sl_7, "sl_8": sl_8, "sl_9": sl_9, "sl_10": sl_10,
|
| 937 |
+
}
|
| 938 |
+
PARAM_COUNTS = {
|
| 939 |
+
"sl_1": 7, "sl_2": 7, "sl_3": 7, "sl_4": 7, "sl_5": 6,
|
| 940 |
+
"sl_6": 8, "sl_7": 7, "sl_8": 7, "sl_9": 7, "sl_10": 7,
|
| 941 |
+
}
|
data_constrained_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:acdc55ff3bffb3020b365007d2b18bc0bf0ec3e359b2ceb3e09abe2bbb94cc2e
|
| 3 |
+
size 2736
|
data_constrained_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0e6f059e657c7476f8f2636110e391d086c78bd826a50c4cc62f06e73c7b020
|
| 3 |
+
size 4589
|
domain_mixture_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,654 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scaling laws for domain mixture proportions (multi-output).
|
| 2 |
+
|
| 3 |
+
X columns: [proportion_domain_1..5]
|
| 4 |
+
Output: [loss_domain_1..5]
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Literal
|
| 8 |
+
|
| 9 |
+
import benchmark.dataset.utils as utils
|
| 10 |
+
|
| 11 |
+
_EPS = 1e-12
|
| 12 |
+
_NUM_DOMAINS = 5
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _squeeze(pred, jac, B):
|
| 16 |
+
if B == 1:
|
| 17 |
+
return pred[0], jac[0]
|
| 18 |
+
return pred, jac
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _assign(arr, backend, idx, val):
|
| 22 |
+
"""Assign val to arr at index idx, handling jax immutability."""
|
| 23 |
+
if backend == "jax":
|
| 24 |
+
return arr.at[idx].set(val)
|
| 25 |
+
arr[idx] = val
|
| 26 |
+
return arr
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# sl_1 (30p): loss_i = a_i + b_i*log(p_i+eps) + sum_{j!=i} c_{ij}*p_j
|
| 30 |
+
# Per domain: a_i(1) + b_i(1) + c_{ij}(4) = 6 -> 30 total
|
| 31 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 32 |
+
ops = utils.get_ops(backend)
|
| 33 |
+
xp = ops.xp
|
| 34 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 35 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 36 |
+
B, M = theta.shape[0], X.shape[0]
|
| 37 |
+
P = 30
|
| 38 |
+
if backend == "torch":
|
| 39 |
+
out = xp.zeros((B, M, _NUM_DOMAINS), dtype=xp.float64)
|
| 40 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P), dtype=xp.float64)
|
| 41 |
+
else:
|
| 42 |
+
out = xp.zeros((B, M, _NUM_DOMAINS))
|
| 43 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P))
|
| 44 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 45 |
+
offset = 0
|
| 46 |
+
for i in range(_NUM_DOMAINS):
|
| 47 |
+
a_i = theta[:, offset]
|
| 48 |
+
b_i = theta[:, offset + 1]
|
| 49 |
+
c_ij = theta[:, offset + 2: offset + 6]
|
| 50 |
+
p_i = ops.clamp_min(X[:, i], _EPS)
|
| 51 |
+
log_pi = xp.log(p_i) # (M,)
|
| 52 |
+
val = a_i[:, None] + b_i[:, None] * log_pi[None, :]
|
| 53 |
+
j_indices = [j for j in range(_NUM_DOMAINS) if j != i]
|
| 54 |
+
for k, j in enumerate(j_indices):
|
| 55 |
+
val = val + c_ij[:, k:k+1] * X[None, :, j]
|
| 56 |
+
out = _assign(out, backend, (slice(None), slice(None), i), val)
|
| 57 |
+
# Jacobian
|
| 58 |
+
# d/d a_i = 1
|
| 59 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset), ones_BM)
|
| 60 |
+
# d/d b_i = log(p_i)
|
| 61 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 1),
|
| 62 |
+
log_pi[None, :] * ones_BM)
|
| 63 |
+
# d/d c_ij = p_j
|
| 64 |
+
for k, j in enumerate(j_indices):
|
| 65 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 2 + k),
|
| 66 |
+
X[None, :, j] * ones_BM)
|
| 67 |
+
offset += 6
|
| 68 |
+
return _squeeze(out, jac, B)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# sl_2 (35p): loss_i = A_i*(p_i+eps_i)^(-alpha_i)*exp(sum_j w_{ij}*p_j)
|
| 72 |
+
# Per domain: A(1)+eps(1)+alpha(1)+w(4 cross)=7 -> 35 total
|
| 73 |
+
def sl_2(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 74 |
+
ops = utils.get_ops(backend)
|
| 75 |
+
xp = ops.xp
|
| 76 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 77 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 78 |
+
B, M = theta.shape[0], X.shape[0]
|
| 79 |
+
P = 35
|
| 80 |
+
if backend == "torch":
|
| 81 |
+
out = xp.zeros((B, M, _NUM_DOMAINS), dtype=xp.float64)
|
| 82 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P), dtype=xp.float64)
|
| 83 |
+
else:
|
| 84 |
+
out = xp.zeros((B, M, _NUM_DOMAINS))
|
| 85 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P))
|
| 86 |
+
offset = 0
|
| 87 |
+
for i in range(_NUM_DOMAINS):
|
| 88 |
+
A_i = theta[:, offset]
|
| 89 |
+
eps_i = theta[:, offset + 1]
|
| 90 |
+
alpha_i = theta[:, offset + 2]
|
| 91 |
+
w_ij = theta[:, offset + 3: offset + 7]
|
| 92 |
+
p_i = ops.clamp_min(X[:, i] + eps_i[:, None], _EPS) # (B, M)
|
| 93 |
+
power_term = A_i[:, None] * (p_i ** (-alpha_i[:, None])) # (B, M)
|
| 94 |
+
j_indices = [j for j in range(_NUM_DOMAINS) if j != i]
|
| 95 |
+
interaction = xp.zeros((B, M)) if backend != "torch" else xp.zeros((B, M), dtype=xp.float64)
|
| 96 |
+
for k, j in enumerate(j_indices):
|
| 97 |
+
interaction = interaction + w_ij[:, k:k+1] * X[None, :, j]
|
| 98 |
+
interaction = ops.clamp(interaction, min=-20.0, max=20.0)
|
| 99 |
+
exp_inter = ops.exp(interaction) # (B, M)
|
| 100 |
+
val = power_term * exp_inter # (B, M)
|
| 101 |
+
out = _assign(out, backend, (slice(None), slice(None), i), val)
|
| 102 |
+
|
| 103 |
+
# Jacobian: val = A_i * (p_i+eps_i)^(-alpha_i) * exp(interaction)
|
| 104 |
+
# Let f = val
|
| 105 |
+
# d/d A_i = f / A_i = (p_i)^(-alpha_i) * exp(interaction)
|
| 106 |
+
d_A = val / ops.clamp_min(xp.abs(A_i[:, None]), _EPS)
|
| 107 |
+
# More robustly: d_A = (p_i ** (-alpha_i)) * exp_inter
|
| 108 |
+
d_A = (p_i ** (-alpha_i[:, None])) * exp_inter
|
| 109 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset), d_A)
|
| 110 |
+
|
| 111 |
+
# d/d eps_i: chain through p_i = X[:,i] + eps_i
|
| 112 |
+
# d(val)/d(eps_i) = A_i * (-alpha_i) * p_i^(-alpha_i - 1) * 1 * exp(inter)
|
| 113 |
+
# = val * (-alpha_i) / p_i
|
| 114 |
+
d_eps = val * (-alpha_i[:, None]) / p_i
|
| 115 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 1), d_eps)
|
| 116 |
+
|
| 117 |
+
# d/d alpha_i: d/d(alpha) of p_i^(-alpha) = -log(p_i) * p_i^(-alpha)
|
| 118 |
+
# d(val)/d(alpha_i) = A_i * (-log(p_i)) * p_i^(-alpha_i) * exp(inter)
|
| 119 |
+
# = val * (-log(p_i))
|
| 120 |
+
log_pi = xp.log(ops.clamp_min(p_i, _EPS))
|
| 121 |
+
d_alpha = val * (-log_pi)
|
| 122 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 2), d_alpha)
|
| 123 |
+
|
| 124 |
+
# d/d w_ij[k]: d(val)/d(w_k) = val * p_j (from exp derivative)
|
| 125 |
+
for k, j in enumerate(j_indices):
|
| 126 |
+
d_w = val * X[None, :, j]
|
| 127 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 3 + k), d_w)
|
| 128 |
+
|
| 129 |
+
offset += 7
|
| 130 |
+
return _squeeze(out, jac, B)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# sl_3 (35p): loss_i = base_i + coeff_i*p_i^exp_i + sum_{j!=i} W_{ij}*p_j
|
| 134 |
+
# Power law self + full linear cross (5 base + 5 coeff + 5 exp + 20 cross = 35)
|
| 135 |
+
# Repack: per domain i: base(1)+coeff(1)+exp(1)+W_{ij}(4)=7 -> 35
|
| 136 |
+
def sl_3(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 137 |
+
ops = utils.get_ops(backend)
|
| 138 |
+
xp = ops.xp
|
| 139 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 140 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 141 |
+
B, M = theta.shape[0], X.shape[0]
|
| 142 |
+
P = 35
|
| 143 |
+
if backend == "torch":
|
| 144 |
+
import torch
|
| 145 |
+
out = torch.zeros((B, M, _NUM_DOMAINS), dtype=torch.float64)
|
| 146 |
+
jac = torch.zeros((B, M, _NUM_DOMAINS, P), dtype=torch.float64)
|
| 147 |
+
else:
|
| 148 |
+
import numpy as np
|
| 149 |
+
out = np.zeros((B, M, _NUM_DOMAINS))
|
| 150 |
+
jac = np.zeros((B, M, _NUM_DOMAINS, P))
|
| 151 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 152 |
+
offset = 0
|
| 153 |
+
for i in range(_NUM_DOMAINS):
|
| 154 |
+
base_i = theta[:, offset]
|
| 155 |
+
coeff_i = theta[:, offset + 1]
|
| 156 |
+
exp_i = theta[:, offset + 2]
|
| 157 |
+
W_ij = theta[:, offset + 3: offset + 7]
|
| 158 |
+
p_i = ops.clamp_min(X[:, i], _EPS)
|
| 159 |
+
p_i_pow = p_i[None, :] ** exp_i[:, None] # (B, M)
|
| 160 |
+
val = base_i[:, None] + coeff_i[:, None] * p_i_pow
|
| 161 |
+
j_indices = [j for j in range(_NUM_DOMAINS) if j != i]
|
| 162 |
+
for k, j in enumerate(j_indices):
|
| 163 |
+
val = val + W_ij[:, k:k+1] * X[None, :, j]
|
| 164 |
+
out[:, :, i] = val
|
| 165 |
+
|
| 166 |
+
# Jacobian
|
| 167 |
+
# d/d base_i = 1
|
| 168 |
+
jac[:, :, i, offset] = ones_BM
|
| 169 |
+
# d/d coeff_i = p_i^exp_i
|
| 170 |
+
jac[:, :, i, offset + 1] = p_i_pow
|
| 171 |
+
# d/d exp_i = coeff_i * p_i^exp_i * log(p_i)
|
| 172 |
+
log_pi = xp.log(ops.clamp_min(p_i, _EPS)) # (M,)
|
| 173 |
+
jac[:, :, i, offset + 2] = coeff_i[:, None] * p_i_pow * log_pi[None, :]
|
| 174 |
+
# d/d W_ij[k] = p_j
|
| 175 |
+
for k, j in enumerate(j_indices):
|
| 176 |
+
jac[:, :, i, offset + 3 + k] = X[None, :, j] * ones_BM
|
| 177 |
+
|
| 178 |
+
offset += 7
|
| 179 |
+
if backend == "jax":
|
| 180 |
+
import jax.numpy as jnp
|
| 181 |
+
out = jnp.array(out)
|
| 182 |
+
jac = jnp.array(jac)
|
| 183 |
+
return _squeeze(out, jac, B)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
# sl_4 (35p): loss_i = exp(sum_k C_{ik}*p_k^alpha_k + bias_i)
|
| 187 |
+
# Exponential of linear combo of power-transformed props
|
| 188 |
+
# 5 bias + 25 C + 5 alpha = 35
|
| 189 |
+
# Pack: 5 alpha first, then per domain: bias(1)+C(5)=6 -> 5+30=35
|
| 190 |
+
def sl_4(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 191 |
+
ops = utils.get_ops(backend)
|
| 192 |
+
xp = ops.xp
|
| 193 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 194 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 195 |
+
B, M = theta.shape[0], X.shape[0]
|
| 196 |
+
P = 35
|
| 197 |
+
# First 5 params: shared alpha exponents
|
| 198 |
+
alphas = theta[:, :5] # (B, 5)
|
| 199 |
+
if backend == "torch":
|
| 200 |
+
out = xp.zeros((B, M, _NUM_DOMAINS), dtype=xp.float64)
|
| 201 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P), dtype=xp.float64)
|
| 202 |
+
else:
|
| 203 |
+
out = xp.zeros((B, M, _NUM_DOMAINS))
|
| 204 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P))
|
| 205 |
+
|
| 206 |
+
# Precompute p_k^alpha_k and log(p_k) for all k
|
| 207 |
+
p_pow = [] # list of (B, M) arrays: p_k^alpha_k
|
| 208 |
+
log_p = [] # list of (M,) arrays: log(p_k)
|
| 209 |
+
for k in range(_NUM_DOMAINS):
|
| 210 |
+
p_k = ops.clamp_min(X[:, k], _EPS)
|
| 211 |
+
lp_k = xp.log(ops.clamp_min(p_k, _EPS))
|
| 212 |
+
log_p.append(lp_k)
|
| 213 |
+
p_pow.append(p_k[None, :] ** alphas[:, k:k+1]) # (B, M)
|
| 214 |
+
|
| 215 |
+
offset = 5
|
| 216 |
+
for i in range(_NUM_DOMAINS):
|
| 217 |
+
bias_i = theta[:, offset]
|
| 218 |
+
C_ik = theta[:, offset + 1: offset + 6] # (B, 5)
|
| 219 |
+
# sum_k C_ik * p_k^alpha_k
|
| 220 |
+
lin = bias_i[:, None] # (B, 1) -> broadcast to (B, M)
|
| 221 |
+
for k in range(_NUM_DOMAINS):
|
| 222 |
+
lin = lin + C_ik[:, k:k+1] * p_pow[k]
|
| 223 |
+
lin = ops.clamp(lin, min=-50.0, max=50.0)
|
| 224 |
+
val = ops.exp(lin) # (B, M)
|
| 225 |
+
out = _assign(out, backend, (slice(None), slice(None), i), val)
|
| 226 |
+
|
| 227 |
+
# Jacobian: val = exp(lin)
|
| 228 |
+
# d(val)/d(param) = val * d(lin)/d(param)
|
| 229 |
+
|
| 230 |
+
# d/d alpha_k (shared, index k in 0..4):
|
| 231 |
+
# d(lin)/d(alpha_k) = C_ik * p_k^alpha_k * log(p_k)
|
| 232 |
+
for k in range(_NUM_DOMAINS):
|
| 233 |
+
d_alpha_k = val * C_ik[:, k:k+1] * p_pow[k] * log_p[k][None, :]
|
| 234 |
+
# Accumulate into shared alpha slot (index k)
|
| 235 |
+
# Multiple domains contribute to same alpha_k, so we add
|
| 236 |
+
if backend == "jax":
|
| 237 |
+
jac = jac.at[:, :, i, k].set(d_alpha_k)
|
| 238 |
+
else:
|
| 239 |
+
jac[:, :, i, k] = d_alpha_k
|
| 240 |
+
|
| 241 |
+
# d/d bias_i = val * 1
|
| 242 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset), val)
|
| 243 |
+
|
| 244 |
+
# d/d C_ik = val * p_k^alpha_k
|
| 245 |
+
for k in range(_NUM_DOMAINS):
|
| 246 |
+
d_C = val * p_pow[k]
|
| 247 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 1 + k), d_C)
|
| 248 |
+
|
| 249 |
+
offset += 6
|
| 250 |
+
return _squeeze(out, jac, B)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
# sl_5 (35p): loss_i = b_i + sum_j W_{ij} * p_j^alpha_j
|
| 254 |
+
# Full weight matrix on power-transformed proportions (shared alpha)
|
| 255 |
+
# 5 alpha + 5 bias + 25 W = 35
|
| 256 |
+
def sl_5(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 257 |
+
ops = utils.get_ops(backend)
|
| 258 |
+
xp = ops.xp
|
| 259 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 260 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 261 |
+
B, M = theta.shape[0], X.shape[0]
|
| 262 |
+
P = 35
|
| 263 |
+
alphas = theta[:, :5] # (B, 5)
|
| 264 |
+
if backend == "torch":
|
| 265 |
+
import torch
|
| 266 |
+
out = torch.zeros((B, M, _NUM_DOMAINS), dtype=torch.float64)
|
| 267 |
+
jac = torch.zeros((B, M, _NUM_DOMAINS, P), dtype=torch.float64)
|
| 268 |
+
else:
|
| 269 |
+
import numpy as np
|
| 270 |
+
out = np.zeros((B, M, _NUM_DOMAINS))
|
| 271 |
+
jac = np.zeros((B, M, _NUM_DOMAINS, P))
|
| 272 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 273 |
+
|
| 274 |
+
# Precompute p_j^alpha_j and log(p_j) for all j
|
| 275 |
+
p_pow = []
|
| 276 |
+
log_p = []
|
| 277 |
+
for j in range(_NUM_DOMAINS):
|
| 278 |
+
p_j = ops.clamp_min(X[:, j], _EPS)
|
| 279 |
+
lp_j = xp.log(ops.clamp_min(p_j, _EPS))
|
| 280 |
+
log_p.append(lp_j)
|
| 281 |
+
p_pow.append(p_j[None, :] ** alphas[:, j:j+1]) # (B, M)
|
| 282 |
+
|
| 283 |
+
offset = 5
|
| 284 |
+
for i in range(_NUM_DOMAINS):
|
| 285 |
+
b_i = theta[:, offset]
|
| 286 |
+
W_ij = theta[:, offset + 1: offset + 6] # (B, 5)
|
| 287 |
+
val = b_i[:, None]
|
| 288 |
+
for j in range(_NUM_DOMAINS):
|
| 289 |
+
val = val + W_ij[:, j:j+1] * p_pow[j]
|
| 290 |
+
out[:, :, i] = val
|
| 291 |
+
|
| 292 |
+
# Jacobian
|
| 293 |
+
# d/d alpha_j (shared, index j in 0..4):
|
| 294 |
+
# d(val)/d(alpha_j) = W_ij * p_j^alpha_j * log(p_j)
|
| 295 |
+
for j in range(_NUM_DOMAINS):
|
| 296 |
+
d_alpha = W_ij[:, j:j+1] * p_pow[j] * log_p[j][None, :]
|
| 297 |
+
jac[:, :, i, j] = d_alpha
|
| 298 |
+
|
| 299 |
+
# d/d b_i = 1
|
| 300 |
+
jac[:, :, i, offset] = ones_BM
|
| 301 |
+
|
| 302 |
+
# d/d W_ij = p_j^alpha_j
|
| 303 |
+
for j in range(_NUM_DOMAINS):
|
| 304 |
+
jac[:, :, i, offset + 1 + j] = p_pow[j]
|
| 305 |
+
|
| 306 |
+
offset += 6
|
| 307 |
+
if backend == "jax":
|
| 308 |
+
import jax.numpy as jnp
|
| 309 |
+
out = jnp.array(out)
|
| 310 |
+
jac = jnp.array(jac)
|
| 311 |
+
return _squeeze(out, jac, B)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
# sl_6 (35p): loss_i = C_i + A_i * (sum_j T_{ij}*p_j)^(-alpha_i)
|
| 315 |
+
# Effective-mixture power law
|
| 316 |
+
# Per domain: C(1)+A(1)+alpha(1)+T(4 cross)=7 -> 35
|
| 317 |
+
def sl_6(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 318 |
+
ops = utils.get_ops(backend)
|
| 319 |
+
xp = ops.xp
|
| 320 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 321 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 322 |
+
B, M = theta.shape[0], X.shape[0]
|
| 323 |
+
P = 35
|
| 324 |
+
if backend == "torch":
|
| 325 |
+
import torch
|
| 326 |
+
out = torch.zeros((B, M, _NUM_DOMAINS), dtype=torch.float64)
|
| 327 |
+
jac = torch.zeros((B, M, _NUM_DOMAINS, P), dtype=torch.float64)
|
| 328 |
+
else:
|
| 329 |
+
import numpy as np
|
| 330 |
+
out = np.zeros((B, M, _NUM_DOMAINS))
|
| 331 |
+
jac = np.zeros((B, M, _NUM_DOMAINS, P))
|
| 332 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 333 |
+
offset = 0
|
| 334 |
+
for i in range(_NUM_DOMAINS):
|
| 335 |
+
C_i = theta[:, offset]
|
| 336 |
+
A_i = theta[:, offset + 1]
|
| 337 |
+
alpha_i = theta[:, offset + 2]
|
| 338 |
+
T_ij = theta[:, offset + 3: offset + 7] # 4 weights for j!=i
|
| 339 |
+
# eff = p_i + sum_{j!=i} T_ij * p_j
|
| 340 |
+
eff = X[None, :, i] # (1, M) or (B, M) after broadcast
|
| 341 |
+
j_indices = [j for j in range(_NUM_DOMAINS) if j != i]
|
| 342 |
+
for k, j in enumerate(j_indices):
|
| 343 |
+
eff = eff + T_ij[:, k:k+1] * X[None, :, j]
|
| 344 |
+
eff = ops.clamp_min(eff, _EPS) # (B, M)
|
| 345 |
+
eff_pow = eff ** (-alpha_i[:, None]) # (B, M)
|
| 346 |
+
val = C_i[:, None] + A_i[:, None] * eff_pow
|
| 347 |
+
out[:, :, i] = val
|
| 348 |
+
|
| 349 |
+
# Jacobian
|
| 350 |
+
# power_term = A_i * eff^(-alpha_i)
|
| 351 |
+
power_term = A_i[:, None] * eff_pow # (B, M)
|
| 352 |
+
log_eff = xp.log(ops.clamp_min(eff, _EPS))
|
| 353 |
+
|
| 354 |
+
# d/d C_i = 1
|
| 355 |
+
jac[:, :, i, offset] = ones_BM
|
| 356 |
+
# d/d A_i = eff^(-alpha_i)
|
| 357 |
+
jac[:, :, i, offset + 1] = eff_pow
|
| 358 |
+
# d/d alpha_i = A_i * eff^(-alpha_i) * (-log(eff)) = power_term * (-log(eff))
|
| 359 |
+
jac[:, :, i, offset + 2] = power_term * (-log_eff)
|
| 360 |
+
# d/d T_ij[k] = A_i * (-alpha_i) * eff^(-alpha_i - 1) * p_j
|
| 361 |
+
# = power_term * (-alpha_i) / eff * p_j
|
| 362 |
+
for k, j in enumerate(j_indices):
|
| 363 |
+
d_T = power_term * (-alpha_i[:, None]) / eff * X[None, :, j]
|
| 364 |
+
jac[:, :, i, offset + 3 + k] = d_T
|
| 365 |
+
|
| 366 |
+
offset += 7
|
| 367 |
+
if backend == "jax":
|
| 368 |
+
import jax.numpy as jnp
|
| 369 |
+
out = jnp.array(out)
|
| 370 |
+
jac = jnp.array(jac)
|
| 371 |
+
return _squeeze(out, jac, B)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
# sl_7 (40p): loss_i = intercept_i + sum_j (c_lin_{ij}*p_j + c_log_{ij}*log(p_j+eps))
|
| 375 |
+
# Simplest: per domain 8p total -> 40. Use: a_i + b_i*p_i + c_i*log(p_i) + sum_{j!=i}(d_{ij}*p_j + e_i*log(p_j))
|
| 376 |
+
# Per domain: a(1)+b(1)+c(1)+d(4)+e(1)=8 -> 40
|
| 377 |
+
def sl_7(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 378 |
+
ops = utils.get_ops(backend)
|
| 379 |
+
xp = ops.xp
|
| 380 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 381 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 382 |
+
B, M = theta.shape[0], X.shape[0]
|
| 383 |
+
P = 40
|
| 384 |
+
if backend == "torch":
|
| 385 |
+
out = xp.zeros((B, M, _NUM_DOMAINS), dtype=xp.float64)
|
| 386 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P), dtype=xp.float64)
|
| 387 |
+
else:
|
| 388 |
+
out = xp.zeros((B, M, _NUM_DOMAINS))
|
| 389 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P))
|
| 390 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 391 |
+
offset = 0
|
| 392 |
+
for i in range(_NUM_DOMAINS):
|
| 393 |
+
a_i = theta[:, offset]
|
| 394 |
+
b_i = theta[:, offset + 1]
|
| 395 |
+
c_i = theta[:, offset + 2]
|
| 396 |
+
d_ij = theta[:, offset + 3: offset + 7] # 4 cross-domain linear
|
| 397 |
+
e_i = theta[:, offset + 7] # shared cross-domain log coeff
|
| 398 |
+
p_i = ops.clamp_min(X[:, i], _EPS)
|
| 399 |
+
log_pi = xp.log(p_i) # (M,)
|
| 400 |
+
val = a_i[:, None] + b_i[:, None] * X[None, :, i] + c_i[:, None] * log_pi[None, :]
|
| 401 |
+
j_indices = [j for j in range(_NUM_DOMAINS) if j != i]
|
| 402 |
+
# Accumulate sum of log(p_j) for d/d e_i
|
| 403 |
+
sum_log_pj = xp.zeros((M,)) if backend != "torch" else xp.zeros((M,), dtype=xp.float64)
|
| 404 |
+
for k, j in enumerate(j_indices):
|
| 405 |
+
p_j = ops.clamp_min(X[:, j], _EPS)
|
| 406 |
+
log_pj = xp.log(p_j) # (M,)
|
| 407 |
+
val = val + d_ij[:, k:k+1] * X[None, :, j] + e_i[:, None] * log_pj[None, :]
|
| 408 |
+
sum_log_pj = sum_log_pj + log_pj
|
| 409 |
+
out = _assign(out, backend, (slice(None), slice(None), i), val)
|
| 410 |
+
|
| 411 |
+
# Jacobian
|
| 412 |
+
# d/d a_i = 1
|
| 413 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset), ones_BM)
|
| 414 |
+
# d/d b_i = p_i (the raw X value, not clamped -- actually it uses X[None,:,i])
|
| 415 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 1),
|
| 416 |
+
X[None, :, i] * ones_BM)
|
| 417 |
+
# d/d c_i = log(p_i)
|
| 418 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 2),
|
| 419 |
+
log_pi[None, :] * ones_BM)
|
| 420 |
+
# d/d d_ij[k] = p_j
|
| 421 |
+
for k, j in enumerate(j_indices):
|
| 422 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 3 + k),
|
| 423 |
+
X[None, :, j] * ones_BM)
|
| 424 |
+
# d/d e_i = sum_{j!=i} log(p_j)
|
| 425 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 7),
|
| 426 |
+
sum_log_pj[None, :] * ones_BM)
|
| 427 |
+
offset += 8
|
| 428 |
+
return _squeeze(out, jac, B)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
# sl_8 (15p): loss_i = c_i - a_i * p_i^b_i
|
| 432 |
+
# Single-domain power law (depends only on own proportion)
|
| 433 |
+
# Per domain: 3p -> 15
|
| 434 |
+
def sl_8(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 435 |
+
ops = utils.get_ops(backend)
|
| 436 |
+
xp = ops.xp
|
| 437 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 438 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 439 |
+
B, M = theta.shape[0], X.shape[0]
|
| 440 |
+
P = 15
|
| 441 |
+
if backend == "torch":
|
| 442 |
+
import torch
|
| 443 |
+
out = torch.zeros((B, M, _NUM_DOMAINS), dtype=torch.float64)
|
| 444 |
+
jac = torch.zeros((B, M, _NUM_DOMAINS, P), dtype=torch.float64)
|
| 445 |
+
else:
|
| 446 |
+
import numpy as np
|
| 447 |
+
out = np.zeros((B, M, _NUM_DOMAINS))
|
| 448 |
+
jac = np.zeros((B, M, _NUM_DOMAINS, P))
|
| 449 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 450 |
+
offset = 0
|
| 451 |
+
for i in range(_NUM_DOMAINS):
|
| 452 |
+
c_i = theta[:, offset]
|
| 453 |
+
a_i = theta[:, offset + 1]
|
| 454 |
+
b_i = theta[:, offset + 2]
|
| 455 |
+
p_i = ops.clamp_min(X[:, i], _EPS)
|
| 456 |
+
log_pi = xp.log(ops.clamp_min(p_i, _EPS)) # (M,)
|
| 457 |
+
p_i_pow = p_i[None, :] ** b_i[:, None] # (B, M)
|
| 458 |
+
val = c_i[:, None] - a_i[:, None] * p_i_pow
|
| 459 |
+
out[:, :, i] = val
|
| 460 |
+
|
| 461 |
+
# Jacobian
|
| 462 |
+
# d/d c_i = 1
|
| 463 |
+
jac[:, :, i, offset] = ones_BM
|
| 464 |
+
# d/d a_i = -p_i^b_i
|
| 465 |
+
jac[:, :, i, offset + 1] = -p_i_pow
|
| 466 |
+
# d/d b_i = -a_i * p_i^b_i * log(p_i)
|
| 467 |
+
jac[:, :, i, offset + 2] = -a_i[:, None] * p_i_pow * log_pi[None, :]
|
| 468 |
+
|
| 469 |
+
offset += 3
|
| 470 |
+
if backend == "jax":
|
| 471 |
+
import jax.numpy as jnp
|
| 472 |
+
out = jnp.array(out)
|
| 473 |
+
jac = jnp.array(jac)
|
| 474 |
+
return _squeeze(out, jac, B)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
# sl_9 (15p): loss_i = a_i + b_i*log(p_i+eps) + c_i*[log(p_i+eps)]^2
|
| 478 |
+
# Quadratic-in-log
|
| 479 |
+
# Per domain: 3p -> 15
|
| 480 |
+
def sl_9(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 481 |
+
ops = utils.get_ops(backend)
|
| 482 |
+
xp = ops.xp
|
| 483 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 484 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 485 |
+
B, M = theta.shape[0], X.shape[0]
|
| 486 |
+
P = 15
|
| 487 |
+
if backend == "torch":
|
| 488 |
+
out = xp.zeros((B, M, _NUM_DOMAINS), dtype=xp.float64)
|
| 489 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P), dtype=xp.float64)
|
| 490 |
+
else:
|
| 491 |
+
out = xp.zeros((B, M, _NUM_DOMAINS))
|
| 492 |
+
jac = xp.zeros((B, M, _NUM_DOMAINS, P))
|
| 493 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 494 |
+
offset = 0
|
| 495 |
+
for i in range(_NUM_DOMAINS):
|
| 496 |
+
a_i = theta[:, offset]
|
| 497 |
+
b_i = theta[:, offset + 1]
|
| 498 |
+
c_i = theta[:, offset + 2]
|
| 499 |
+
p_i = ops.clamp_min(X[:, i], _EPS)
|
| 500 |
+
lp = xp.log(p_i)[None, :] # (1, M)
|
| 501 |
+
val = a_i[:, None] + b_i[:, None] * lp + c_i[:, None] * lp ** 2
|
| 502 |
+
out = _assign(out, backend, (slice(None), slice(None), i), val)
|
| 503 |
+
|
| 504 |
+
# Jacobian
|
| 505 |
+
# d/d a_i = 1
|
| 506 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset), ones_BM)
|
| 507 |
+
# d/d b_i = log(p_i)
|
| 508 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 1),
|
| 509 |
+
lp * ones_BM)
|
| 510 |
+
# d/d c_i = [log(p_i)]^2
|
| 511 |
+
jac = _assign(jac, backend, (slice(None), slice(None), i, offset + 2),
|
| 512 |
+
(lp ** 2) * ones_BM)
|
| 513 |
+
|
| 514 |
+
offset += 3
|
| 515 |
+
return _squeeze(out, jac, B)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
# sl_10 (15p): loss_i = a_i + b_i / (p_i + eps_i)
|
| 519 |
+
# Reciprocal law
|
| 520 |
+
# Per domain: 3p -> 15
|
| 521 |
+
def sl_10(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 522 |
+
ops = utils.get_ops(backend)
|
| 523 |
+
xp = ops.xp
|
| 524 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 525 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 526 |
+
B, M = theta.shape[0], X.shape[0]
|
| 527 |
+
P = 15
|
| 528 |
+
if backend == "torch":
|
| 529 |
+
import torch
|
| 530 |
+
out = torch.zeros((B, M, _NUM_DOMAINS), dtype=torch.float64)
|
| 531 |
+
jac = torch.zeros((B, M, _NUM_DOMAINS, P), dtype=torch.float64)
|
| 532 |
+
else:
|
| 533 |
+
import numpy as np
|
| 534 |
+
out = np.zeros((B, M, _NUM_DOMAINS))
|
| 535 |
+
jac = np.zeros((B, M, _NUM_DOMAINS, P))
|
| 536 |
+
ones_BM = xp.ones((B, M)) if backend != "torch" else xp.ones((B, M), dtype=xp.float64)
|
| 537 |
+
offset = 0
|
| 538 |
+
for i in range(_NUM_DOMAINS):
|
| 539 |
+
a_i = theta[:, offset]
|
| 540 |
+
b_i = theta[:, offset + 1]
|
| 541 |
+
eps_i = theta[:, offset + 2]
|
| 542 |
+
denom = ops.clamp_min(X[None, :, i] + eps_i[:, None], _EPS) # (B, M)
|
| 543 |
+
val = a_i[:, None] + b_i[:, None] / denom
|
| 544 |
+
out[:, :, i] = val
|
| 545 |
+
|
| 546 |
+
# Jacobian
|
| 547 |
+
# d/d a_i = 1
|
| 548 |
+
jac[:, :, i, offset] = ones_BM
|
| 549 |
+
# d/d b_i = 1 / (p_i + eps_i)
|
| 550 |
+
jac[:, :, i, offset + 1] = 1.0 / denom
|
| 551 |
+
# d/d eps_i = -b_i / (p_i + eps_i)^2
|
| 552 |
+
jac[:, :, i, offset + 2] = -b_i[:, None] / (denom ** 2)
|
| 553 |
+
|
| 554 |
+
offset += 3
|
| 555 |
+
if backend == "jax":
|
| 556 |
+
import jax.numpy as jnp
|
| 557 |
+
out = jnp.array(out)
|
| 558 |
+
jac = jnp.array(jac)
|
| 559 |
+
return _squeeze(out, jac, B)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
PARAM_BOUNDS = {
|
| 563 |
+
# Dataset: p_i (proportions) in [0, 0.75], min nonzero ~0.03125,
|
| 564 |
+
# log(p_i) in [-3.47, -0.29], loss_i in [1.21, 3.97] (per domain).
|
| 565 |
+
#
|
| 566 |
+
# Bound derivation:
|
| 567 |
+
# - Loss-floor constants (a_i, base_i, C_i, c_i in sl_8): (-3, 6) —
|
| 568 |
+
# total loss is 1.21–3.97, so floor < 4.
|
| 569 |
+
# - Log coefficients (b_i, c_i in sl_1/7/9): loss / |log(p_min)| ~ 3/3.47 ~ 0.9,
|
| 570 |
+
# so |coeff| ≲ 3–5; use (-10, 5) or (-5, 5) with margin.
|
| 571 |
+
# - Linear cross-domain weights (c_ij, W_ij, d_ij): p_j ≤ 0.75, contribution
|
| 572 |
+
# ≲ 3 → |weight| ≲ 4; use (-10, 10) with generous margin.
|
| 573 |
+
# - Power-law exponents (exp_i, alpha_i, b_i in sl_8): typically 0–2 for physical
|
| 574 |
+
# decay; allow (-2, 4) for exploration.
|
| 575 |
+
# - Interaction / mixing weights (w_ij in sl_2, T_ij in sl_6): code already
|
| 576 |
+
# clamps exp() to [-20,20]/[-50,50], so overflow is impossible; use (-5, 5).
|
| 577 |
+
# - sl_2 A_i (scale): (0, 10) — (p+eps)^{-alpha} ~ 1–30, A * 30 ~ 3 → A ≲ 10.
|
| 578 |
+
# - sl_4/5 shared alphas: (-1, 3) — fitted values 0.97–1.98.
|
| 579 |
+
# - sl_8 c_i (maximum loss), a_i (decay scale), b_i (exponent): all positive,
|
| 580 |
+
# fitted a_i ~ 0.23–0.84, b_i ~ 0.23–0.34, c_i ~ 1.96–3.59.
|
| 581 |
+
# - sl_10 b_i: very small (~0.01–0.06 in fit); use (-1, 1).
|
| 582 |
+
# - sl_10 eps_i: small shift; use (-0.03, 0.3).
|
| 583 |
+
#
|
| 584 |
+
# No overflow: all expressions bounded by construction or by code-level clamps.
|
| 585 |
+
|
| 586 |
+
# sl_1: 30p = 5 × [a_i, b_i, c_i1..c_i4]
|
| 587 |
+
# loss_i = a_i + b_i*log(p_i) + sum_{j≠i} c_ij*p_j
|
| 588 |
+
# Optimal: a~0.8–3.3, b~-0.02–0.003 (near zero), c~-0.5–1.6
|
| 589 |
+
# b_i·log(p_i): log(p) in [-3.47, -0.29]; for |b|·3.47 ≲ 3 → |b| ≲ 1. Use (-10, 5).
|
| 590 |
+
"sl_1": [(-3, 6), (-10, 5), (-10, 10), (-10, 10), (-10, 10), (-10, 10)] * 5,
|
| 591 |
+
|
| 592 |
+
# sl_2: 35p = 5 × [A_i, eps_i, alpha_i, w_i1..w_i4]
|
| 593 |
+
# loss_i = A_i*(p_i+eps_i)^{-alpha_i} * exp(sum_j w_ij*p_j) [exp clamped ±20]
|
| 594 |
+
# Optimal: A~2.2–6.2, eps~0.027–0.091, alpha~0.05–0.46, w~-1.5–0.1
|
| 595 |
+
"sl_2": [(0, 10), (-0.03, 0.2), (0, 2), (-5, 5), (-5, 5), (-5, 5), (-5, 5)] * 5,
|
| 596 |
+
|
| 597 |
+
# sl_3: 35p = 5 × [base_i, coeff_i, exp_i, W_i1..W_i4]
|
| 598 |
+
# loss_i = base_i + coeff_i*p_i^exp_i + sum_{j≠i} W_ij*p_j
|
| 599 |
+
# Optimal: base~0–4.4, coeff~-11–7.2, exp~1.2–2.6, W~-1.6–3.8
|
| 600 |
+
# At exp_i=-2, p_min^{-2}=1024 → coeff·1024 ≲ 3 → coeff ≲ 0.003; tight with (-20,20).
|
| 601 |
+
"sl_3": [(-3, 6), (-20, 20), (-2, 4), (-10, 10), (-10, 10), (-10, 10), (-10, 10)] * 5,
|
| 602 |
+
|
| 603 |
+
# sl_4: 35p = 5 shared alphas + 5 × [bias_i, C_i1..C_i5]
|
| 604 |
+
# loss_i = exp(bias_i + sum_k C_ik*p_k^alpha_k) [lin clamped to ±50]
|
| 605 |
+
# log(loss) in [0.19, 1.37]; so lin ~ 0.2–1.4; bias + sum ~ 0.2–1.4.
|
| 606 |
+
# Optimal: alphas~0.97–1.93, bias~-0.85–1.45, C~-3.95–4.08
|
| 607 |
+
"sl_4": [(-1, 3)] * 5 + [(-3, 3), (-10, 10), (-10, 10), (-10, 10), (-10, 10), (-10, 10)] * 5,
|
| 608 |
+
|
| 609 |
+
# sl_5: 35p = 5 shared alphas + 5 × [b_i, W_i1..W_i5]
|
| 610 |
+
# loss_i = b_i + sum_j W_ij*p_j^alpha_j
|
| 611 |
+
# Optimal: alphas~1.04–1.98, b~1.1–3.5, W~-15.5–7.5
|
| 612 |
+
# At alpha=2, p_min^2=0.001 → W·0.001 ≲ 3 → W ≲ 3000 (but optimal max |W|~15.5).
|
| 613 |
+
# Use (-25, 25) to contain observed -15.5 with margin.
|
| 614 |
+
"sl_5": [(-1, 3)] * 5 + [(-3, 6), (-25, 25), (-25, 25), (-25, 25), (-25, 25), (-25, 25)] * 5,
|
| 615 |
+
|
| 616 |
+
# sl_6: 35p = 5 × [C_i, A_i, alpha_i, T_i1..T_i4]
|
| 617 |
+
# loss_i = C_i + A_i*(p_i + sum_{j≠i} T_ij*p_j)^{-alpha_i} [eff clamped ≥ EPS]
|
| 618 |
+
# Optimal: C~1.6–3.4, A~0–0.044 (near zero), alpha~0.22–1.83, T~-3.1–4.5
|
| 619 |
+
# A_i very small in fit (sl_6 mostly reduces to constant C); allow (0, 10).
|
| 620 |
+
"sl_6": [(-3, 6), (0, 10), (0, 3), (-5, 5), (-5, 5), (-5, 5), (-5, 5)] * 5,
|
| 621 |
+
|
| 622 |
+
# sl_7: 40p = 5 × [a_i, b_i, c_i, d_i1..d_i4, e_i]
|
| 623 |
+
# loss_i = a_i + b_i*p_i + c_i*log(p_i) + sum_{j≠i}(d_ij*p_j + e_i*log(p_j))
|
| 624 |
+
# Optimal: a~-2.8–2.8, b~-0.9–6.7, c~-0.02–0.01, d~-3.6–6.6, e~-0.007–0.008
|
| 625 |
+
# c_i and e_i are near-zero (log terms contribute little); use (-5, 5).
|
| 626 |
+
"sl_7": [(-5, 8), (-10, 15), (-5, 5), (-10, 10), (-10, 10), (-10, 10), (-10, 10), (-5, 5)] * 5,
|
| 627 |
+
|
| 628 |
+
# sl_8: 15p = 5 × [c_i, a_i, b_i]
|
| 629 |
+
# loss_i = c_i - a_i*p_i^{b_i} (physical: a_i>0, b_i>0, c_i = max loss at p_i→0)
|
| 630 |
+
# Optimal: c~1.96–3.59, a~0.23–0.84, b~0.23–0.34
|
| 631 |
+
# At p_i=0.03125, b_i=1: a·0.03125 ≲ 2 → a ≲ 64; use (0, 5) as tight bound.
|
| 632 |
+
"sl_8": [(0, 6), (0, 5), (0, 3)] * 5,
|
| 633 |
+
|
| 634 |
+
# sl_9: 15p = 5 × [a_i, b_i, c_i]
|
| 635 |
+
# loss_i = a_i + b_i*log(p_i) + c_i*[log(p_i)]^2
|
| 636 |
+
# Optimal: a~1.17–3.25, b~-0.15 to -0.04, c~-0.004 to -0.001 (near zero)
|
| 637 |
+
# [log(p)]^2 in [0.08, 12]; c·12 ≲ 0.05 → negligible; use (-1, 1) for c_i.
|
| 638 |
+
"sl_9": [(-3, 6), (-2, 1), (-1, 1)] * 5,
|
| 639 |
+
|
| 640 |
+
# sl_10: 15p = 5 × [a_i, b_i, eps_i]
|
| 641 |
+
# loss_i = a_i + b_i / (p_i + eps_i) [denom clamped ≥ EPS]
|
| 642 |
+
# Optimal: a~1.28–3.27, b~0.009–0.057, eps~0.014–0.100
|
| 643 |
+
# b_i very small: b/(p+eps) ~ 0.05/(0.25+0.05) ~ 0.17; use (-1, 1).
|
| 644 |
+
"sl_10": [(-3, 6), (-1, 1), (-0.03, 0.3)] * 5,
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
LAW_REGISTRY = {
|
| 648 |
+
"sl_1": sl_1, "sl_2": sl_2, "sl_3": sl_3, "sl_4": sl_4, "sl_5": sl_5,
|
| 649 |
+
"sl_6": sl_6, "sl_7": sl_7, "sl_8": sl_8, "sl_9": sl_9, "sl_10": sl_10,
|
| 650 |
+
}
|
| 651 |
+
PARAM_COUNTS = {
|
| 652 |
+
"sl_1": 30, "sl_2": 35, "sl_3": 35, "sl_4": 35, "sl_5": 35,
|
| 653 |
+
"sl_6": 35, "sl_7": 40, "sl_8": 15, "sl_9": 15, "sl_10": 15,
|
| 654 |
+
}
|
domain_mixture_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b6f9e71e38cdd329881f75592d8b847f062c1360daac2e5fbb20f99aee63b26
|
| 3 |
+
size 6363
|
domain_mixture_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2ca2e6ed1f78d0a462b425394fb748b4b489b079b1aa320753149e5368e2015
|
| 3 |
+
size 8979
|
farseer_scaling_law/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""Farseer scaling law benchmark dataset."""
|
farseer_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scaling laws for the Farseer N-D benchmark split.
|
| 2 |
+
|
| 3 |
+
X columns: [N (Model Size), D (Training Tokens)]
|
| 4 |
+
|
| 5 |
+
Primary law:
|
| 6 |
+
L(N, D) = exp(s * N^q + S) + exp(B * N^b + Q) * D^(-exp(A * N^a + E))
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from typing import Literal
|
| 10 |
+
|
| 11 |
+
import benchmark.dataset.utils as utils
|
| 12 |
+
|
| 13 |
+
_EPS = 1e-12
|
| 14 |
+
_EXP_CLIP = 50.0
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Farseer law (9 params):
|
| 18 |
+
# exp(s * N^q + S) + exp(B * N^b + Q) * D^(-exp(A * N^a + E))
|
| 19 |
+
# theta: [E, s, q, S, B, b, Q, A, a]
|
| 20 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 21 |
+
ops = utils.get_ops(backend)
|
| 22 |
+
xp = ops.xp
|
| 23 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 24 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 25 |
+
|
| 26 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 27 |
+
D = ops.clamp_min(X[:, 1], _EPS)
|
| 28 |
+
|
| 29 |
+
E = theta[:, 0]
|
| 30 |
+
s = theta[:, 1]
|
| 31 |
+
q = theta[:, 2]
|
| 32 |
+
S = theta[:, 3]
|
| 33 |
+
B = theta[:, 4]
|
| 34 |
+
b = theta[:, 5]
|
| 35 |
+
Q = theta[:, 6]
|
| 36 |
+
A = theta[:, 7]
|
| 37 |
+
a = theta[:, 8]
|
| 38 |
+
|
| 39 |
+
log_N = xp.log(ops.clamp_min(N, _EPS))
|
| 40 |
+
log_D = xp.log(ops.clamp_min(D, _EPS))
|
| 41 |
+
|
| 42 |
+
N_pow_q = N[None, :] ** q[:, None]
|
| 43 |
+
N_pow_b = N[None, :] ** b[:, None]
|
| 44 |
+
N_pow_a = N[None, :] ** a[:, None]
|
| 45 |
+
|
| 46 |
+
term1_arg = ops.clamp(s[:, None] * N_pow_q + S[:, None], min=-_EXP_CLIP, max=_EXP_CLIP)
|
| 47 |
+
bn_arg = ops.clamp(B[:, None] * N_pow_b + Q[:, None], min=-_EXP_CLIP, max=_EXP_CLIP)
|
| 48 |
+
exp_an_arg = ops.clamp(A[:, None] * N_pow_a + E[:, None], min=-_EXP_CLIP, max=_EXP_CLIP)
|
| 49 |
+
|
| 50 |
+
term1 = ops.exp(term1_arg)
|
| 51 |
+
exp_an = ops.exp(exp_an_arg)
|
| 52 |
+
an = -exp_an
|
| 53 |
+
log_term2 = ops.clamp(bn_arg + an * log_D[None, :], min=-_EXP_CLIP, max=_EXP_CLIP)
|
| 54 |
+
term2 = ops.exp(log_term2)
|
| 55 |
+
pred = term1 + term2
|
| 56 |
+
|
| 57 |
+
ones = xp.ones_like(pred)
|
| 58 |
+
|
| 59 |
+
d_E = term2 * log_D[None, :] * an
|
| 60 |
+
d_s = term1 * N_pow_q
|
| 61 |
+
d_q = term1 * s[:, None] * N_pow_q * log_N[None, :]
|
| 62 |
+
d_S = term1
|
| 63 |
+
d_B = term2 * N_pow_b
|
| 64 |
+
d_b = term2 * B[:, None] * N_pow_b * log_N[None, :]
|
| 65 |
+
d_Q = term2
|
| 66 |
+
d_A = term2 * log_D[None, :] * an * N_pow_a
|
| 67 |
+
d_a = term2 * log_D[None, :] * an * A[:, None] * N_pow_a * log_N[None, :]
|
| 68 |
+
|
| 69 |
+
jac = ops.stack([d_E, d_s, d_q, d_S, d_B, d_b, d_Q, d_A, d_a], axis=-1)
|
| 70 |
+
|
| 71 |
+
if pred.shape[0] == 1:
|
| 72 |
+
return pred[0], jac[0]
|
| 73 |
+
return pred, jac
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
LAW_REGISTRY = {"sl_1": sl_1}
|
| 77 |
+
PARAM_COUNTS = {"sl_1": 9}
|
| 78 |
+
|
| 79 |
+
# Data ranges based on the integrated benchmark split:
|
| 80 |
+
# N ∈ [9.96e7, 2.51e10], D ∈ [1.0e9, 5.12e11], loss ∈ [0.438, 0.675]
|
| 81 |
+
PARAM_BOUNDS = {
|
| 82 |
+
# Bounds centered around the paper / notebook ground-truth parameters,
|
| 83 |
+
# but widened substantially to reduce prior pressure while preserving
|
| 84 |
+
# the sign of the exponent terms: q > 0, b < 0, a > 0.
|
| 85 |
+
# E=3.133347198805445, s=-0.062465473, q=0.13, S=0.1284880679442551,
|
| 86 |
+
# B=230.73437075885855, b=-0.1729, Q=-1.544209554,
|
| 87 |
+
# A=-1.665630816, a=0.0458999999999619
|
| 88 |
+
"sl_1": [
|
| 89 |
+
(1.0, 6.0), # E
|
| 90 |
+
(-1.0, 0.3), # s
|
| 91 |
+
(0.01, 0.5), # q > 0
|
| 92 |
+
(-1.0, 1.0), # S
|
| 93 |
+
(10.0, 1000.0), # B
|
| 94 |
+
(-0.6, -0.01), # b < 0
|
| 95 |
+
(-5.0, 1.0), # Q
|
| 96 |
+
(-5.0, 1.0), # A
|
| 97 |
+
(0.001, 0.25), # a > 0
|
| 98 |
+
],
|
| 99 |
+
}
|
farseer_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f886cce70555fed73c6ecbdc6588edc0cb0947a00db36e15c8795afcba280f3d
|
| 3 |
+
size 2841
|
farseer_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98c4167b4b111814bcf8450bef0ec975239da067dd4f42ef6b71f4c1667705bc
|
| 3 |
+
size 7363
|
lr_bsz_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,1188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scaling laws for learning-rate / batch-size / data-size / model-size."""
|
| 2 |
+
|
| 3 |
+
from typing import Literal
|
| 4 |
+
|
| 5 |
+
import benchmark.dataset.utils as utils
|
| 6 |
+
|
| 7 |
+
_EPS = 1e-30
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Scaling law 1 (15 params):
|
| 11 |
+
# Degree-2 polynomial in log-space of 4 features, predicting log(lm_loss).
|
| 12 |
+
# Output = exp(poly).
|
| 13 |
+
#
|
| 14 |
+
# Features after log: z = [log(lr), log(bsz), log(data_size), log(non_embedding_param_size)]
|
| 15 |
+
# Polynomial terms (15 total):
|
| 16 |
+
# bias, z0, z1, z2, z3, z0^2, z1^2, z2^2, z3^2, z0*z1, z0*z2, z0*z3, z1*z2, z1*z3, z2*z3
|
| 17 |
+
#
|
| 18 |
+
# theta: (B, 15)
|
| 19 |
+
# X: [lr, bsz, data_size, non_embedding_param_size]
|
| 20 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 21 |
+
ops = utils.get_ops(backend)
|
| 22 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 23 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 24 |
+
|
| 25 |
+
xp = ops.xp
|
| 26 |
+
|
| 27 |
+
x0 = ops.clamp_min(X[:, 0], _EPS)
|
| 28 |
+
x1 = ops.clamp_min(X[:, 1], _EPS)
|
| 29 |
+
x2 = ops.clamp_min(X[:, 2], _EPS)
|
| 30 |
+
x3 = ops.clamp_min(X[:, 3], _EPS)
|
| 31 |
+
|
| 32 |
+
z0 = xp.log(x0)
|
| 33 |
+
z1 = xp.log(x1)
|
| 34 |
+
z2 = xp.log(x2)
|
| 35 |
+
z3 = xp.log(x3)
|
| 36 |
+
|
| 37 |
+
# Build feature matrix: (M, 15)
|
| 38 |
+
ones = z0 * 0.0 + 1.0
|
| 39 |
+
if backend == "torch":
|
| 40 |
+
features = xp.stack([
|
| 41 |
+
ones, z0, z1, z2, z3,
|
| 42 |
+
z0 * z0, z1 * z1, z2 * z2, z3 * z3,
|
| 43 |
+
z0 * z1, z0 * z2, z0 * z3, z1 * z2, z1 * z3, z2 * z3,
|
| 44 |
+
], dim=-1) # (M, 15)
|
| 45 |
+
else:
|
| 46 |
+
features = xp.stack([
|
| 47 |
+
ones, z0, z1, z2, z3,
|
| 48 |
+
z0 * z0, z1 * z1, z2 * z2, z3 * z3,
|
| 49 |
+
z0 * z1, z0 * z2, z0 * z3, z1 * z2, z1 * z3, z2 * z3,
|
| 50 |
+
], axis=-1) # (M, 15)
|
| 51 |
+
|
| 52 |
+
# theta: (B, 15), features: (M, 15) -> log_pred: (B, M)
|
| 53 |
+
if backend == "torch":
|
| 54 |
+
log_pred = xp.matmul(theta, features.T)
|
| 55 |
+
else:
|
| 56 |
+
log_pred = theta @ features.T
|
| 57 |
+
|
| 58 |
+
pred = ops.exp(log_pred)
|
| 59 |
+
|
| 60 |
+
# Jacobian: d pred / d theta_i = pred * features[:, i]
|
| 61 |
+
# pred: (B, M), features: (M, 15) -> jac: (B, M, 15)
|
| 62 |
+
jac = pred[:, :, None] * features[None, :, :]
|
| 63 |
+
|
| 64 |
+
if pred.shape[0] == 1:
|
| 65 |
+
return pred[0], jac[0]
|
| 66 |
+
return pred, jac
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# sl_2 (26p): Physics-inspired softplus-penalty model
|
| 70 |
+
# base = L_inf + Cp*exp(-ap*p) + Cd*exp(-ad*s) + Cdp*exp(-adp*(s-k*p)) + Cbb*exp(-abb*v)
|
| 71 |
+
# u_star = u0 + up*p + us*s + uv*v; v_star = v0 + vp*p + vs*s
|
| 72 |
+
# cL = softplus(cL0+cLp*p+cLs*s+cLv*v); cB = softplus(cB0+cBp*p+cBs*s+cBv*v)
|
| 73 |
+
# penalty = cL*du^2 + cB*dv^2 + 2*rho*sqrt(cL*cB)*du*dv
|
| 74 |
+
# loss = base + penalty
|
| 75 |
+
# theta: [L_inf, Cp, ap, Cd, ad, Cdp, adp, k, u0, up, us, uv, v0, vp, vs,
|
| 76 |
+
# cL0, cLp, cLs, cB0, cBp, cBs, rho, cLv, cBv, Cbb, abb]
|
| 77 |
+
def sl_2(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 78 |
+
ops = utils.get_ops(backend)
|
| 79 |
+
xp = ops.xp
|
| 80 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 81 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 82 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 83 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 84 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 85 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 86 |
+
u = xp.log(lr); v = xp.log(bsz); s = xp.log(D); p = xp.log(P)
|
| 87 |
+
t = [theta[:, i] for i in range(26)]
|
| 88 |
+
L_inf, Cp, ap, Cd, ad, Cdp, adp, k = t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7]
|
| 89 |
+
u0, up_, us, uv, v0, vp_, vs = t[8], t[9], t[10], t[11], t[12], t[13], t[14]
|
| 90 |
+
cL0, cLp, cLs, cB0, cBp, cBs, rho = t[15], t[16], t[17], t[18], t[19], t[20], t[21]
|
| 91 |
+
cLv, cBv, Cbb, abb = t[22], t[23], t[24], t[25]
|
| 92 |
+
|
| 93 |
+
def softplus(x):
|
| 94 |
+
return xp.log(1.0 + ops.exp(ops.clamp(x, min=-20.0, max=20.0)))
|
| 95 |
+
|
| 96 |
+
def sigmoid(x):
|
| 97 |
+
ex = ops.exp(ops.clamp(x, min=-20.0, max=20.0))
|
| 98 |
+
return ex / (1.0 + ex)
|
| 99 |
+
|
| 100 |
+
# Base loss (power-law terms in exp form)
|
| 101 |
+
Cp_s = softplus(Cp); ap_s = softplus(ap)
|
| 102 |
+
Cd_s = softplus(Cd); ad_s = softplus(ad)
|
| 103 |
+
Cdp_s = softplus(Cdp); adp_s = softplus(adp)
|
| 104 |
+
Cbb_s = softplus(Cbb); abb_s = softplus(abb)
|
| 105 |
+
|
| 106 |
+
# Sigmoid for softplus derivatives
|
| 107 |
+
sig_Cp = sigmoid(Cp); sig_ap = sigmoid(ap)
|
| 108 |
+
sig_Cd = sigmoid(Cd); sig_ad = sigmoid(ad)
|
| 109 |
+
sig_Cdp = sigmoid(Cdp); sig_adp = sigmoid(adp)
|
| 110 |
+
sig_Cbb = sigmoid(Cbb); sig_abb = sigmoid(abb)
|
| 111 |
+
|
| 112 |
+
exp_P = ops.exp(-ap_s[:, None] * p[None, :]) # (B, M)
|
| 113 |
+
exp_D = ops.exp(-ad_s[:, None] * s[None, :]) # (B, M)
|
| 114 |
+
exp_DP = ops.exp(-adp_s[:, None] * (s[None, :] - k[:, None] * p[None, :])) # (B, M)
|
| 115 |
+
exp_V = ops.exp(-abb_s[:, None] * v[None, :]) # (B, M)
|
| 116 |
+
|
| 117 |
+
base = (L_inf[:, None]
|
| 118 |
+
+ Cp_s[:, None] * exp_P
|
| 119 |
+
+ Cd_s[:, None] * exp_D
|
| 120 |
+
+ Cdp_s[:, None] * exp_DP
|
| 121 |
+
+ Cbb_s[:, None] * exp_V)
|
| 122 |
+
|
| 123 |
+
# Optimal lr and bsz
|
| 124 |
+
u_star = u0[:, None] + up_[:, None] * p[None, :] + us[:, None] * s[None, :] + uv[:, None] * v[None, :]
|
| 125 |
+
v_star = v0[:, None] + vp_[:, None] * p[None, :] + vs[:, None] * s[None, :]
|
| 126 |
+
du = u[None, :] - u_star
|
| 127 |
+
dv = v[None, :] - v_star
|
| 128 |
+
|
| 129 |
+
# State-dependent curvatures
|
| 130 |
+
zL = cL0[:, None] + cLp[:, None] * p[None, :] + cLs[:, None] * s[None, :] + cLv[:, None] * v[None, :]
|
| 131 |
+
zB = cB0[:, None] + cBp[:, None] * p[None, :] + cBs[:, None] * s[None, :] + cBv[:, None] * v[None, :]
|
| 132 |
+
cL = softplus(zL)
|
| 133 |
+
cB = softplus(zB)
|
| 134 |
+
sig_zL = sigmoid(zL) # (B, M)
|
| 135 |
+
sig_zB = sigmoid(zB) # (B, M)
|
| 136 |
+
|
| 137 |
+
# Correlated penalty
|
| 138 |
+
rho_t = xp.tanh(rho[:, None]) if hasattr(xp, 'tanh') else (ops.exp(2.0 * rho[:, None]) - 1.0) / (ops.exp(2.0 * rho[:, None]) + 1.0)
|
| 139 |
+
g = (cL * cB) ** 0.5
|
| 140 |
+
penalty = cL * du ** 2 + cB * dv ** 2 + 2.0 * rho_t * g * du * dv
|
| 141 |
+
|
| 142 |
+
pred = base + penalty
|
| 143 |
+
|
| 144 |
+
# ---- Jacobian computation ----
|
| 145 |
+
# B_dim = theta.shape[0], M = X.shape[0], P = 26
|
| 146 |
+
ones_BM = pred * 0.0 + 1.0 # (B, M)
|
| 147 |
+
zeros_BM = pred * 0.0 # (B, M)
|
| 148 |
+
|
| 149 |
+
# Helper: g = sqrt(cL*cB), dg/dcL = 0.5*cB/g, dg/dcB = 0.5*cL/g
|
| 150 |
+
g_safe = ops.clamp_min(g, _EPS)
|
| 151 |
+
dg_dcL = 0.5 * cB / g_safe # (B, M)
|
| 152 |
+
dg_dcB = 0.5 * cL / g_safe # (B, M)
|
| 153 |
+
|
| 154 |
+
# d(penalty)/d(cL) = du^2 + 2*rho_t*dg_dcL*du*dv
|
| 155 |
+
dpen_dcL = du ** 2 + 2.0 * rho_t * dg_dcL * du * dv
|
| 156 |
+
# d(penalty)/d(cB) = dv^2 + 2*rho_t*dg_dcB*du*dv
|
| 157 |
+
dpen_dcB = dv ** 2 + 2.0 * rho_t * dg_dcB * du * dv
|
| 158 |
+
|
| 159 |
+
# d(penalty)/d(du) = 2*cL*du + 2*rho_t*g*dv
|
| 160 |
+
dpen_ddu = 2.0 * cL * du + 2.0 * rho_t * g * dv
|
| 161 |
+
# d(penalty)/d(dv) = 2*cB*dv + 2*rho_t*g*du
|
| 162 |
+
dpen_ddv = 2.0 * cB * dv + 2.0 * rho_t * g * du
|
| 163 |
+
|
| 164 |
+
# du = u - u_star, dv = v - v_star
|
| 165 |
+
# d(du)/d(u0) = -1, d(du)/d(up) = -p, d(du)/d(us) = -s, d(du)/d(uv) = -v
|
| 166 |
+
# d(dv)/d(v0) = -1, d(dv)/d(vp) = -p, d(dv)/d(vs) = -s
|
| 167 |
+
|
| 168 |
+
# d(penalty)/d(rho) = (1 - tanh(rho)^2) * 2*g*du*dv
|
| 169 |
+
drho_t = 1.0 - rho_t ** 2 # (B, M) or (B, 1) -> broadcast
|
| 170 |
+
dpen_drho = drho_t * 2.0 * g * du * dv # (B, M)
|
| 171 |
+
|
| 172 |
+
# Now compute each partial:
|
| 173 |
+
# t[0] = L_inf: d/dL_inf = 1
|
| 174 |
+
d_0 = ones_BM
|
| 175 |
+
|
| 176 |
+
# t[1] = Cp: d/dCp = sig(Cp) * exp_P
|
| 177 |
+
d_1 = sig_Cp[:, None] * exp_P
|
| 178 |
+
|
| 179 |
+
# t[2] = ap: d/dap = sig(ap) * Cp_s * (-p) * exp_P
|
| 180 |
+
d_2 = sig_ap[:, None] * Cp_s[:, None] * (-p[None, :]) * exp_P
|
| 181 |
+
|
| 182 |
+
# t[3] = Cd: d/dCd = sig(Cd) * exp_D
|
| 183 |
+
d_3 = sig_Cd[:, None] * exp_D
|
| 184 |
+
|
| 185 |
+
# t[4] = ad: d/dad = sig(ad) * Cd_s * (-s) * exp_D
|
| 186 |
+
d_4 = sig_ad[:, None] * Cd_s[:, None] * (-s[None, :]) * exp_D
|
| 187 |
+
|
| 188 |
+
# t[5] = Cdp: d/dCdp = sig(Cdp) * exp_DP
|
| 189 |
+
d_5 = sig_Cdp[:, None] * exp_DP
|
| 190 |
+
|
| 191 |
+
# t[6] = adp: d/dadp = sig(adp) * Cdp_s * (-(s-k*p)) * exp_DP
|
| 192 |
+
d_6 = sig_adp[:, None] * Cdp_s[:, None] * (-(s[None, :] - k[:, None] * p[None, :])) * exp_DP
|
| 193 |
+
|
| 194 |
+
# t[7] = k: d/dk = Cdp_s * adp_s * p * exp_DP
|
| 195 |
+
d_7 = Cdp_s[:, None] * adp_s[:, None] * p[None, :] * exp_DP
|
| 196 |
+
|
| 197 |
+
# t[8] = u0: d/du0 = dpen_ddu * (-1)
|
| 198 |
+
d_8 = dpen_ddu * (-1.0)
|
| 199 |
+
|
| 200 |
+
# t[9] = up: d/dup = dpen_ddu * (-p)
|
| 201 |
+
d_9 = dpen_ddu * (-p[None, :])
|
| 202 |
+
|
| 203 |
+
# t[10] = us: d/dus = dpen_ddu * (-s)
|
| 204 |
+
d_10 = dpen_ddu * (-s[None, :])
|
| 205 |
+
|
| 206 |
+
# t[11] = uv: d/duv = dpen_ddu * (-v)
|
| 207 |
+
d_11 = dpen_ddu * (-v[None, :])
|
| 208 |
+
|
| 209 |
+
# t[12] = v0: d/dv0 = dpen_ddv * (-1)
|
| 210 |
+
d_12 = dpen_ddv * (-1.0)
|
| 211 |
+
|
| 212 |
+
# t[13] = vp: d/dvp = dpen_ddv * (-p)
|
| 213 |
+
d_13 = dpen_ddv * (-p[None, :])
|
| 214 |
+
|
| 215 |
+
# t[14] = vs: d/dvs = dpen_ddv * (-s)
|
| 216 |
+
d_14 = dpen_ddv * (-s[None, :])
|
| 217 |
+
|
| 218 |
+
# t[15] = cL0: d/dcL0 = dpen_dcL * sig_zL * 1
|
| 219 |
+
d_15 = dpen_dcL * sig_zL
|
| 220 |
+
|
| 221 |
+
# t[16] = cLp: d/dcLp = dpen_dcL * sig_zL * p
|
| 222 |
+
d_16 = dpen_dcL * sig_zL * p[None, :]
|
| 223 |
+
|
| 224 |
+
# t[17] = cLs: d/dcLs = dpen_dcL * sig_zL * s
|
| 225 |
+
d_17 = dpen_dcL * sig_zL * s[None, :]
|
| 226 |
+
|
| 227 |
+
# t[18] = cB0: d/dcB0 = dpen_dcB * sig_zB * 1
|
| 228 |
+
d_18 = dpen_dcB * sig_zB
|
| 229 |
+
|
| 230 |
+
# t[19] = cBp: d/dcBp = dpen_dcB * sig_zB * p
|
| 231 |
+
d_19 = dpen_dcB * sig_zB * p[None, :]
|
| 232 |
+
|
| 233 |
+
# t[20] = cBs: d/dcBs = dpen_dcB * sig_zB * s
|
| 234 |
+
d_20 = dpen_dcB * sig_zB * s[None, :]
|
| 235 |
+
|
| 236 |
+
# t[21] = rho: d/drho = dpen_drho
|
| 237 |
+
d_21 = dpen_drho
|
| 238 |
+
|
| 239 |
+
# t[22] = cLv: d/dcLv = dpen_dcL * sig_zL * v
|
| 240 |
+
d_22 = dpen_dcL * sig_zL * v[None, :]
|
| 241 |
+
|
| 242 |
+
# t[23] = cBv: d/dcBv = dpen_dcB * sig_zB * v
|
| 243 |
+
d_23 = dpen_dcB * sig_zB * v[None, :]
|
| 244 |
+
|
| 245 |
+
# t[24] = Cbb: d/dCbb = sig(Cbb) * exp_V
|
| 246 |
+
d_24 = sig_Cbb[:, None] * exp_V
|
| 247 |
+
|
| 248 |
+
# t[25] = abb: d/dabb = sig(abb) * Cbb_s * (-v) * exp_V
|
| 249 |
+
d_25 = sig_abb[:, None] * Cbb_s[:, None] * (-v[None, :]) * exp_V
|
| 250 |
+
|
| 251 |
+
jac = ops.stack([d_0, d_1, d_2, d_3, d_4, d_5, d_6, d_7,
|
| 252 |
+
d_8, d_9, d_10, d_11, d_12, d_13, d_14,
|
| 253 |
+
d_15, d_16, d_17, d_18, d_19, d_20, d_21,
|
| 254 |
+
d_22, d_23, d_24, d_25], axis=-1)
|
| 255 |
+
|
| 256 |
+
if pred.shape[0] == 1:
|
| 257 |
+
return pred[0], jac[0]
|
| 258 |
+
return pred, jac
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# sl_3 (24p): Chinchilla power-law + decoupled LR/BSZ quadratic valleys
|
| 262 |
+
# L = E + A*N^(-alpha) + B*D^(-beta) + F/(N^wN * D^wD)
|
| 263 |
+
# + C_eff*(log(lr)-opt_lr)^2 + G_eff*(log(bsz)-opt_bsz)^2
|
| 264 |
+
# theta: [E, A, alpha, B, beta, F, wN, wD, C0, CN, CD, CB, mu0, muN, muD, muB, muND,
|
| 265 |
+
# G0, GN, GD, nu0, nuN, nuD, nuND]
|
| 266 |
+
def sl_3(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 267 |
+
ops = utils.get_ops(backend)
|
| 268 |
+
xp = ops.xp
|
| 269 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 270 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 271 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 272 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 273 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 274 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 275 |
+
lnlr = xp.log(lr); lnb = xp.log(bsz); lnD = xp.log(D); lnP = xp.log(P)
|
| 276 |
+
t = [theta[:, i] for i in range(24)]
|
| 277 |
+
E, A, alpha, B, beta, F, wN, wD = t[:8]
|
| 278 |
+
C0, CN, CD, CB, mu0, muN, muD, muB, muND = t[8:17]
|
| 279 |
+
G0, GN, GD, nu0, nuN, nuD, nuND = t[17:24]
|
| 280 |
+
|
| 281 |
+
# Intermediate computations
|
| 282 |
+
P_neg_alpha = P[None, :] ** (-alpha[:, None]) # (B, M)
|
| 283 |
+
D_neg_beta = D[None, :] ** (-beta[:, None]) # (B, M)
|
| 284 |
+
PwN = P[None, :] ** wN[:, None] # (B, M)
|
| 285 |
+
DwD = D[None, :] ** wD[:, None] # (B, M)
|
| 286 |
+
denom = ops.clamp_min(PwN * DwD, _EPS)
|
| 287 |
+
joint_term = F[:, None] / denom # (B, M)
|
| 288 |
+
|
| 289 |
+
base = (E[:, None]
|
| 290 |
+
+ A[:, None] * P_neg_alpha
|
| 291 |
+
+ B[:, None] * D_neg_beta
|
| 292 |
+
+ joint_term)
|
| 293 |
+
|
| 294 |
+
opt_lr = mu0[:, None] + muN[:, None]*lnP[None, :] + muD[:, None]*lnD[None, :] + muB[:, None]*lnb[None, :] + muND[:, None]*lnP[None, :]*lnD[None, :]
|
| 295 |
+
lr_exp = CN[:, None]*lnP[None, :] + CD[:, None]*lnD[None, :] + CB[:, None]*lnb[None, :]
|
| 296 |
+
C_eff = C0[:, None] * ops.exp(lr_exp)
|
| 297 |
+
dlr = lnlr[None, :] - opt_lr
|
| 298 |
+
lr_pen = C_eff * dlr ** 2
|
| 299 |
+
|
| 300 |
+
opt_bsz = nu0[:, None] + nuN[:, None]*lnP[None, :] + nuD[:, None]*lnD[None, :] + nuND[:, None]*lnP[None, :]*lnD[None, :]
|
| 301 |
+
bsz_exp = GN[:, None]*lnP[None, :] + GD[:, None]*lnD[None, :]
|
| 302 |
+
G_eff = G0[:, None] * ops.exp(bsz_exp)
|
| 303 |
+
dbsz = lnb[None, :] - opt_bsz
|
| 304 |
+
bsz_pen = G_eff * dbsz ** 2
|
| 305 |
+
|
| 306 |
+
pred = base + lr_pen + bsz_pen
|
| 307 |
+
|
| 308 |
+
# ---- Jacobian ----
|
| 309 |
+
ones_BM = pred * 0.0 + 1.0
|
| 310 |
+
|
| 311 |
+
# d/dE = 1
|
| 312 |
+
d_E = ones_BM
|
| 313 |
+
|
| 314 |
+
# d/dA = P^(-alpha)
|
| 315 |
+
d_A = P_neg_alpha
|
| 316 |
+
|
| 317 |
+
# d/dalpha = A * P^(-alpha) * (-lnP) = -A * P_neg_alpha * lnP
|
| 318 |
+
d_alpha = -A[:, None] * P_neg_alpha * lnP[None, :]
|
| 319 |
+
|
| 320 |
+
# d/dB = D^(-beta)
|
| 321 |
+
d_B = D_neg_beta
|
| 322 |
+
|
| 323 |
+
# d/dbeta = -B * D^(-beta) * lnD
|
| 324 |
+
d_beta = -B[:, None] * D_neg_beta * lnD[None, :]
|
| 325 |
+
|
| 326 |
+
# d/dF = 1/denom
|
| 327 |
+
d_F = 1.0 / denom
|
| 328 |
+
|
| 329 |
+
# d/dwN = F * d/dwN(1/(P^wN * D^wD)) = F * (-lnP) / denom = -joint_term * lnP
|
| 330 |
+
d_wN = -joint_term * lnP[None, :]
|
| 331 |
+
|
| 332 |
+
# d/dwD = -joint_term * lnD
|
| 333 |
+
d_wD = -joint_term * lnD[None, :]
|
| 334 |
+
|
| 335 |
+
# LR penalty partials
|
| 336 |
+
# C_eff = C0 * exp(lr_exp), dlr = lnlr - opt_lr
|
| 337 |
+
# lr_pen = C_eff * dlr^2
|
| 338 |
+
|
| 339 |
+
# d/dC0 = exp(lr_exp) * dlr^2
|
| 340 |
+
d_C0 = ops.exp(lr_exp) * dlr ** 2
|
| 341 |
+
|
| 342 |
+
# d/dCN = C_eff * lnP * dlr^2 (chain through lr_exp)
|
| 343 |
+
d_CN = C_eff * lnP[None, :] * dlr ** 2
|
| 344 |
+
|
| 345 |
+
# d/dCD = C_eff * lnD * dlr^2
|
| 346 |
+
d_CD = C_eff * lnD[None, :] * dlr ** 2
|
| 347 |
+
|
| 348 |
+
# d/dCB = C_eff * lnb * dlr^2
|
| 349 |
+
d_CB = C_eff * lnb[None, :] * dlr ** 2
|
| 350 |
+
|
| 351 |
+
# d/dmu0 = C_eff * 2*dlr * (-1) = -2*C_eff*dlr
|
| 352 |
+
d_mu0 = -2.0 * C_eff * dlr
|
| 353 |
+
|
| 354 |
+
# d/dmuN = -2*C_eff*dlr * lnP
|
| 355 |
+
d_muN = -2.0 * C_eff * dlr * lnP[None, :]
|
| 356 |
+
|
| 357 |
+
# d/dmuD = -2*C_eff*dlr * lnD
|
| 358 |
+
d_muD = -2.0 * C_eff * dlr * lnD[None, :]
|
| 359 |
+
|
| 360 |
+
# d/dmuB = -2*C_eff*dlr * lnb
|
| 361 |
+
d_muB = -2.0 * C_eff * dlr * lnb[None, :]
|
| 362 |
+
|
| 363 |
+
# d/dmuND = -2*C_eff*dlr * lnP*lnD
|
| 364 |
+
d_muND = -2.0 * C_eff * dlr * lnP[None, :] * lnD[None, :]
|
| 365 |
+
|
| 366 |
+
# BSZ penalty partials
|
| 367 |
+
# G_eff = G0 * exp(bsz_exp), dbsz = lnb - opt_bsz
|
| 368 |
+
# bsz_pen = G_eff * dbsz^2
|
| 369 |
+
|
| 370 |
+
# d/dG0 = exp(bsz_exp) * dbsz^2
|
| 371 |
+
d_G0 = ops.exp(bsz_exp) * dbsz ** 2
|
| 372 |
+
|
| 373 |
+
# d/dGN = G_eff * lnP * dbsz^2
|
| 374 |
+
d_GN = G_eff * lnP[None, :] * dbsz ** 2
|
| 375 |
+
|
| 376 |
+
# d/dGD = G_eff * lnD * dbsz^2
|
| 377 |
+
d_GD = G_eff * lnD[None, :] * dbsz ** 2
|
| 378 |
+
|
| 379 |
+
# d/dnu0 = -2*G_eff*dbsz
|
| 380 |
+
d_nu0 = -2.0 * G_eff * dbsz
|
| 381 |
+
|
| 382 |
+
# d/dnuN = -2*G_eff*dbsz * lnP
|
| 383 |
+
d_nuN = -2.0 * G_eff * dbsz * lnP[None, :]
|
| 384 |
+
|
| 385 |
+
# d/dnuD = -2*G_eff*dbsz * lnD
|
| 386 |
+
d_nuD = -2.0 * G_eff * dbsz * lnD[None, :]
|
| 387 |
+
|
| 388 |
+
# d/dnuND = -2*G_eff*dbsz * lnP*lnD
|
| 389 |
+
d_nuND = -2.0 * G_eff * dbsz * lnP[None, :] * lnD[None, :]
|
| 390 |
+
|
| 391 |
+
# Order: [E, A, alpha, B, beta, F, wN, wD,
|
| 392 |
+
# C0, CN, CD, CB, mu0, muN, muD, muB, muND,
|
| 393 |
+
# G0, GN, GD, nu0, nuN, nuD, nuND]
|
| 394 |
+
jac = ops.stack([d_E, d_A, d_alpha, d_B, d_beta, d_F, d_wN, d_wD,
|
| 395 |
+
d_C0, d_CN, d_CD, d_CB, d_mu0, d_muN, d_muD, d_muB, d_muND,
|
| 396 |
+
d_G0, d_GN, d_GD, d_nu0, d_nuN, d_nuD, d_nuND], axis=-1)
|
| 397 |
+
|
| 398 |
+
if pred.shape[0] == 1:
|
| 399 |
+
return pred[0], jac[0]
|
| 400 |
+
return pred, jac
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
# sl_4 (20p): Log-polynomial-2 + inverse features
|
| 404 |
+
# log(loss) = w . phi(X), loss = exp(...)
|
| 405 |
+
# phi = [1, log(lr), log(bsz), log(D), log(P),
|
| 406 |
+
# log(lr)^2, log(bsz)^2, log(D)^2, log(P)^2,
|
| 407 |
+
# log(lr)*log(bsz), log(lr)*log(D), log(lr)*log(P),
|
| 408 |
+
# log(bsz)*log(D), log(bsz)*log(P), log(D)*log(P),
|
| 409 |
+
# log(D)-log(P), 1/bsz, 1/bsz^2, 1/D, 1/P]
|
| 410 |
+
# theta: 20 weights
|
| 411 |
+
def sl_4(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 412 |
+
ops = utils.get_ops(backend)
|
| 413 |
+
xp = ops.xp
|
| 414 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 415 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 416 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 417 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 418 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 419 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 420 |
+
z0 = xp.log(lr); z1 = xp.log(bsz); z2 = xp.log(D); z3 = xp.log(P)
|
| 421 |
+
ones = z0 * 0.0 + 1.0
|
| 422 |
+
feat_list = [
|
| 423 |
+
ones, z0, z1, z2, z3,
|
| 424 |
+
z0*z0, z1*z1, z2*z2, z3*z3,
|
| 425 |
+
z0*z1, z0*z2, z0*z3, z1*z2, z1*z3, z2*z3,
|
| 426 |
+
z2 - z3,
|
| 427 |
+
1.0 / bsz, 1.0 / (bsz * bsz), 1.0 / D, 1.0 / P,
|
| 428 |
+
]
|
| 429 |
+
if backend == "torch":
|
| 430 |
+
features = xp.stack(feat_list, dim=-1)
|
| 431 |
+
log_pred = xp.matmul(theta, features.T)
|
| 432 |
+
else:
|
| 433 |
+
features = xp.stack(feat_list, axis=-1)
|
| 434 |
+
log_pred = theta @ features.T
|
| 435 |
+
pred = ops.exp(log_pred)
|
| 436 |
+
|
| 437 |
+
# Jacobian: same as sl_1, d pred / d theta_i = pred * features[:, i]
|
| 438 |
+
jac = pred[:, :, None] * features[None, :, :]
|
| 439 |
+
|
| 440 |
+
if pred.shape[0] == 1:
|
| 441 |
+
return pred[0], jac[0]
|
| 442 |
+
return pred, jac
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
# sl_5 (19p): Chinchilla + exp-decay + LR quadratic penalty
|
| 446 |
+
# u=log(lr), v=log(bsz), s=log(D), n=log(P)
|
| 447 |
+
# u_star = u0+kb*v+kn*n+kd*s; lr_amp = clr0*exp(-wb*v-wn*n-ws*s)
|
| 448 |
+
# loss = L0 + AN*exp(-aN*n) + AD*exp(-aD*s) + AB*exp(-aB*v)
|
| 449 |
+
# + AR*exp(-aR*(s-n)^2) + AX*exp(-aX*(s-v)) + lr_amp*(u-u_star)^2
|
| 450 |
+
# theta: [L0, AN, aN, AD, aD, AB, aB, clr0, u0, kb, kn, kd, wb, wn, ws, AR, aR, AX, aX]
|
| 451 |
+
def sl_5(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 452 |
+
ops = utils.get_ops(backend)
|
| 453 |
+
xp = ops.xp
|
| 454 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 455 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 456 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 457 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 458 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 459 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 460 |
+
u = xp.log(lr); v = xp.log(bsz); s = xp.log(D); n = xp.log(P)
|
| 461 |
+
t = [theta[:, i] for i in range(19)]
|
| 462 |
+
L0, AN, aN, AD, aD, AB, aB = t[:7]
|
| 463 |
+
clr0, u0, kb, kn, kd, wb, wn, ws = t[7:15]
|
| 464 |
+
AR, aR, AX, aX = t[15:19]
|
| 465 |
+
|
| 466 |
+
exp_N = ops.exp(-aN[:, None] * n[None, :]) # (B, M)
|
| 467 |
+
exp_D = ops.exp(-aD[:, None] * s[None, :]) # (B, M)
|
| 468 |
+
exp_V = ops.exp(-aB[:, None] * v[None, :]) # (B, M)
|
| 469 |
+
|
| 470 |
+
base = (L0[:, None]
|
| 471 |
+
+ AN[:, None] * exp_N
|
| 472 |
+
+ AD[:, None] * exp_D
|
| 473 |
+
+ AB[:, None] * exp_V)
|
| 474 |
+
|
| 475 |
+
sn_diff = s[None, :] - n[None, :]
|
| 476 |
+
sn_diff_sq = sn_diff ** 2
|
| 477 |
+
exp_R = ops.exp(-aR[:, None] * sn_diff_sq) # (B, M)
|
| 478 |
+
ratio_term = AR[:, None] * exp_R
|
| 479 |
+
|
| 480 |
+
sv_diff = s[None, :] - v[None, :]
|
| 481 |
+
exp_X = ops.exp(-aX[:, None] * sv_diff) # (B, M)
|
| 482 |
+
cross_term = AX[:, None] * exp_X
|
| 483 |
+
|
| 484 |
+
u_star = u0[:, None] + kb[:, None]*v[None, :] + kn[:, None]*n[None, :] + kd[:, None]*s[None, :]
|
| 485 |
+
lr_exp_arg = -wb[:, None]*v[None, :] - wn[:, None]*n[None, :] - ws[:, None]*s[None, :]
|
| 486 |
+
lr_amp = clr0[:, None] * ops.exp(lr_exp_arg)
|
| 487 |
+
du = u[None, :] - u_star
|
| 488 |
+
du_sq = du ** 2
|
| 489 |
+
lr_pen = lr_amp * du_sq
|
| 490 |
+
|
| 491 |
+
pred = base + ratio_term + cross_term + lr_pen
|
| 492 |
+
|
| 493 |
+
# ---- Jacobian ----
|
| 494 |
+
ones_BM = pred * 0.0 + 1.0
|
| 495 |
+
|
| 496 |
+
# t[0] = L0
|
| 497 |
+
d_0 = ones_BM
|
| 498 |
+
|
| 499 |
+
# t[1] = AN: d/dAN = exp_N
|
| 500 |
+
d_1 = exp_N
|
| 501 |
+
|
| 502 |
+
# t[2] = aN: d/daN = AN * (-n) * exp_N
|
| 503 |
+
d_2 = AN[:, None] * (-n[None, :]) * exp_N
|
| 504 |
+
|
| 505 |
+
# t[3] = AD: d/dAD = exp_D
|
| 506 |
+
d_3 = exp_D
|
| 507 |
+
|
| 508 |
+
# t[4] = aD: d/daD = AD * (-s) * exp_D
|
| 509 |
+
d_4 = AD[:, None] * (-s[None, :]) * exp_D
|
| 510 |
+
|
| 511 |
+
# t[5] = AB: d/dAB = exp_V
|
| 512 |
+
d_5 = exp_V
|
| 513 |
+
|
| 514 |
+
# t[6] = aB: d/daB = AB * (-v) * exp_V
|
| 515 |
+
d_6 = AB[:, None] * (-v[None, :]) * exp_V
|
| 516 |
+
|
| 517 |
+
# lr_pen = clr0 * exp(lr_exp_arg) * du^2
|
| 518 |
+
# lr_amp = clr0 * exp(lr_exp_arg)
|
| 519 |
+
exp_lr = ops.exp(lr_exp_arg)
|
| 520 |
+
|
| 521 |
+
# t[7] = clr0: d/dclr0 = exp(lr_exp_arg) * du^2
|
| 522 |
+
d_7 = exp_lr * du_sq
|
| 523 |
+
|
| 524 |
+
# d(lr_pen)/d(du) = lr_amp * 2*du; d(du)/d(u0) = -1
|
| 525 |
+
dlrpen_ddu = 2.0 * lr_amp * du
|
| 526 |
+
|
| 527 |
+
# t[8] = u0: d/du0 = -dlrpen_ddu
|
| 528 |
+
d_8 = -dlrpen_ddu
|
| 529 |
+
|
| 530 |
+
# t[9] = kb: d/dkb = dlrpen_ddu * (-v)
|
| 531 |
+
d_9 = dlrpen_ddu * (-v[None, :])
|
| 532 |
+
|
| 533 |
+
# t[10] = kn: d/dkn = dlrpen_ddu * (-n)
|
| 534 |
+
d_10 = dlrpen_ddu * (-n[None, :])
|
| 535 |
+
|
| 536 |
+
# t[11] = kd: d/dkd = dlrpen_ddu * (-s)
|
| 537 |
+
d_11 = dlrpen_ddu * (-s[None, :])
|
| 538 |
+
|
| 539 |
+
# t[12] = wb: d/dwb = lr_pen * (-v) (chain through exp)
|
| 540 |
+
d_12 = lr_pen * (-v[None, :])
|
| 541 |
+
|
| 542 |
+
# t[13] = wn: d/dwn = lr_pen * (-n)
|
| 543 |
+
d_13 = lr_pen * (-n[None, :])
|
| 544 |
+
|
| 545 |
+
# t[14] = ws: d/dws = lr_pen * (-s)
|
| 546 |
+
d_14 = lr_pen * (-s[None, :])
|
| 547 |
+
|
| 548 |
+
# t[15] = AR: d/dAR = exp_R
|
| 549 |
+
d_15 = exp_R
|
| 550 |
+
|
| 551 |
+
# t[16] = aR: d/daR = AR * (-(s-n)^2) * exp_R
|
| 552 |
+
d_16 = AR[:, None] * (-sn_diff_sq) * exp_R
|
| 553 |
+
|
| 554 |
+
# t[17] = AX: d/dAX = exp_X
|
| 555 |
+
d_17 = exp_X
|
| 556 |
+
|
| 557 |
+
# t[18] = aX: d/daX = AX * (-(s-v)) * exp_X
|
| 558 |
+
d_18 = AX[:, None] * (-sv_diff) * exp_X
|
| 559 |
+
|
| 560 |
+
jac = ops.stack([d_0, d_1, d_2, d_3, d_4, d_5, d_6,
|
| 561 |
+
d_7, d_8, d_9, d_10, d_11, d_12, d_13, d_14,
|
| 562 |
+
d_15, d_16, d_17, d_18], axis=-1)
|
| 563 |
+
|
| 564 |
+
if pred.shape[0] == 1:
|
| 565 |
+
return pred[0], jac[0]
|
| 566 |
+
return pred, jac
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
# sl_6 (14p): L_inf + exp(partial poly2)
|
| 570 |
+
# loss = L_inf + exp(w0 + w_d*log(D) + w_p*log(P) + w_dp*log(D)*log(P)
|
| 571 |
+
# + w_lr*log(lr) + w_lr2*log(lr)^2
|
| 572 |
+
# + w_bsz*log(bsz) + w_bsz2*log(bsz)^2
|
| 573 |
+
# + w_lrbsz*log(lr)*log(bsz)
|
| 574 |
+
# + w_lrD*log(lr)*log(D) + w_lrP*log(lr)*log(P)
|
| 575 |
+
# + w_bszD*log(bsz)*log(D) + w_bszP*log(bsz)*log(P))
|
| 576 |
+
# theta: [L_inf, w0, w_d, w_p, w_dp, w_lr, w_lr2, w_bsz, w_bsz2,
|
| 577 |
+
# w_lrbsz, w_lrD, w_lrP, w_bszD, w_bszP]
|
| 578 |
+
def sl_6(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 579 |
+
ops = utils.get_ops(backend)
|
| 580 |
+
xp = ops.xp
|
| 581 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 582 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 583 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 584 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 585 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 586 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 587 |
+
lnlr = xp.log(lr); lnb = xp.log(bsz); lnD = xp.log(D); lnP = xp.log(P)
|
| 588 |
+
t = [theta[:, i] for i in range(14)]
|
| 589 |
+
L_inf = t[0]
|
| 590 |
+
exponent = (t[1][:, None]
|
| 591 |
+
+ t[2][:, None]*lnD[None, :] + t[3][:, None]*lnP[None, :]
|
| 592 |
+
+ t[4][:, None]*lnD[None, :]*lnP[None, :]
|
| 593 |
+
+ t[5][:, None]*lnlr[None, :] + t[6][:, None]*lnlr[None, :]**2
|
| 594 |
+
+ t[7][:, None]*lnb[None, :] + t[8][:, None]*lnb[None, :]**2
|
| 595 |
+
+ t[9][:, None]*lnlr[None, :]*lnb[None, :]
|
| 596 |
+
+ t[10][:, None]*lnlr[None, :]*lnD[None, :] + t[11][:, None]*lnlr[None, :]*lnP[None, :]
|
| 597 |
+
+ t[12][:, None]*lnb[None, :]*lnD[None, :] + t[13][:, None]*lnb[None, :]*lnP[None, :])
|
| 598 |
+
exponent = ops.clamp(exponent, min=-50.0, max=50.0)
|
| 599 |
+
exp_term = ops.exp(exponent)
|
| 600 |
+
pred = L_inf[:, None] + exp_term
|
| 601 |
+
|
| 602 |
+
# ---- Jacobian ----
|
| 603 |
+
# d/dL_inf = 1
|
| 604 |
+
ones_BM = pred * 0.0 + 1.0
|
| 605 |
+
d_0 = ones_BM
|
| 606 |
+
|
| 607 |
+
# For t[1..13]: d/dt_i = exp_term * (feature_i)
|
| 608 |
+
# feature for t[1] = 1 (w0)
|
| 609 |
+
d_1 = exp_term
|
| 610 |
+
|
| 611 |
+
# t[2] = w_d: feature = lnD
|
| 612 |
+
d_2 = exp_term * lnD[None, :]
|
| 613 |
+
|
| 614 |
+
# t[3] = w_p: feature = lnP
|
| 615 |
+
d_3 = exp_term * lnP[None, :]
|
| 616 |
+
|
| 617 |
+
# t[4] = w_dp: feature = lnD*lnP
|
| 618 |
+
d_4 = exp_term * lnD[None, :] * lnP[None, :]
|
| 619 |
+
|
| 620 |
+
# t[5] = w_lr: feature = lnlr
|
| 621 |
+
d_5 = exp_term * lnlr[None, :]
|
| 622 |
+
|
| 623 |
+
# t[6] = w_lr2: feature = lnlr^2
|
| 624 |
+
d_6 = exp_term * lnlr[None, :] ** 2
|
| 625 |
+
|
| 626 |
+
# t[7] = w_bsz: feature = lnb
|
| 627 |
+
d_7 = exp_term * lnb[None, :]
|
| 628 |
+
|
| 629 |
+
# t[8] = w_bsz2: feature = lnb^2
|
| 630 |
+
d_8 = exp_term * lnb[None, :] ** 2
|
| 631 |
+
|
| 632 |
+
# t[9] = w_lrbsz: feature = lnlr*lnb
|
| 633 |
+
d_9 = exp_term * lnlr[None, :] * lnb[None, :]
|
| 634 |
+
|
| 635 |
+
# t[10] = w_lrD: feature = lnlr*lnD
|
| 636 |
+
d_10 = exp_term * lnlr[None, :] * lnD[None, :]
|
| 637 |
+
|
| 638 |
+
# t[11] = w_lrP: feature = lnlr*lnP
|
| 639 |
+
d_11 = exp_term * lnlr[None, :] * lnP[None, :]
|
| 640 |
+
|
| 641 |
+
# t[12] = w_bszD: feature = lnb*lnD
|
| 642 |
+
d_12 = exp_term * lnb[None, :] * lnD[None, :]
|
| 643 |
+
|
| 644 |
+
# t[13] = w_bszP: feature = lnb*lnP
|
| 645 |
+
d_13 = exp_term * lnb[None, :] * lnP[None, :]
|
| 646 |
+
|
| 647 |
+
jac = ops.stack([d_0, d_1, d_2, d_3, d_4, d_5, d_6, d_7,
|
| 648 |
+
d_8, d_9, d_10, d_11, d_12, d_13], axis=-1)
|
| 649 |
+
|
| 650 |
+
if pred.shape[0] == 1:
|
| 651 |
+
return pred[0], jac[0]
|
| 652 |
+
return pred, jac
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
# sl_7 (31p): E + exp(poly2_A) + exp(poly2_B) dual-term
|
| 656 |
+
# features = [1, x1..x4, x1^2..x4^2, x1*x2, x1*x3, x1*x4, x2*x3, x2*x4, x3*x4]
|
| 657 |
+
# (15 features from log inputs)
|
| 658 |
+
# loss = E + exp(features . w1) + exp(features . w2)
|
| 659 |
+
# theta: [E, w1[0..14], w2[0..14]]
|
| 660 |
+
def sl_7(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 661 |
+
ops = utils.get_ops(backend)
|
| 662 |
+
xp = ops.xp
|
| 663 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 664 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 665 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 666 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 667 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 668 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 669 |
+
z0 = xp.log(lr); z1 = xp.log(bsz); z2 = xp.log(D); z3 = xp.log(P)
|
| 670 |
+
ones = z0 * 0.0 + 1.0
|
| 671 |
+
feat_list = [
|
| 672 |
+
ones, z0, z1, z2, z3,
|
| 673 |
+
z0*z0, z1*z1, z2*z2, z3*z3,
|
| 674 |
+
z0*z1, z0*z2, z0*z3, z1*z2, z1*z3, z2*z3,
|
| 675 |
+
]
|
| 676 |
+
if backend == "torch":
|
| 677 |
+
features = xp.stack(feat_list, dim=-1) # (M, 15)
|
| 678 |
+
else:
|
| 679 |
+
features = xp.stack(feat_list, axis=-1) # (M, 15)
|
| 680 |
+
E = theta[:, 0]
|
| 681 |
+
w1 = theta[:, 1:16] # (B, 15)
|
| 682 |
+
w2 = theta[:, 16:31] # (B, 15)
|
| 683 |
+
if backend == "torch":
|
| 684 |
+
log1 = xp.matmul(w1, features.T)
|
| 685 |
+
log2 = xp.matmul(w2, features.T)
|
| 686 |
+
else:
|
| 687 |
+
log1 = w1 @ features.T
|
| 688 |
+
log2 = w2 @ features.T
|
| 689 |
+
log1 = ops.clamp(log1, min=-50.0, max=50.0)
|
| 690 |
+
log2 = ops.clamp(log2, min=-50.0, max=50.0)
|
| 691 |
+
exp1 = ops.exp(log1)
|
| 692 |
+
exp2 = ops.exp(log2)
|
| 693 |
+
pred = E[:, None] + exp1 + exp2
|
| 694 |
+
|
| 695 |
+
# ---- Jacobian ----
|
| 696 |
+
# d/dE = 1
|
| 697 |
+
ones_BM = pred * 0.0 + 1.0
|
| 698 |
+
|
| 699 |
+
# d/dw1_i = exp1 * features[:, i] -> (B, M)
|
| 700 |
+
# d/dw2_i = exp2 * features[:, i] -> (B, M)
|
| 701 |
+
# jac_w1: (B, M, 15) = exp1[:,:,None] * features[None,:,:]
|
| 702 |
+
jac_w1 = exp1[:, :, None] * features[None, :, :] # (B, M, 15)
|
| 703 |
+
jac_w2 = exp2[:, :, None] * features[None, :, :] # (B, M, 15)
|
| 704 |
+
|
| 705 |
+
# Build full jac: [d_E, jac_w1[...,0], ..., jac_w1[...,14], jac_w2[...,0], ..., jac_w2[...,14]]
|
| 706 |
+
partials = [ones_BM]
|
| 707 |
+
for i in range(15):
|
| 708 |
+
partials.append(jac_w1[:, :, i])
|
| 709 |
+
for i in range(15):
|
| 710 |
+
partials.append(jac_w2[:, :, i])
|
| 711 |
+
|
| 712 |
+
jac = ops.stack(partials, axis=-1)
|
| 713 |
+
|
| 714 |
+
if pred.shape[0] == 1:
|
| 715 |
+
return pred[0], jac[0]
|
| 716 |
+
return pred, jac
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
# sl_8 (20p): Chinchilla + asymmetric tanh-skewed penalties
|
| 720 |
+
# u=log(lr), v=log(bsz), s=log(D), p=log(P)
|
| 721 |
+
# term_P = cP*exp(-aP*p); term_D = cD*exp(-aD*s); term_R = cR*exp(-aR*(s-p))
|
| 722 |
+
# lr_opt = phi0+phi_b*v+phi_p*p+phi_d*s; dev=u-lr_opt
|
| 723 |
+
# lr_pen = k_lr*dev^2*(1+a_lr*tanh(dev))
|
| 724 |
+
# ns = u-0.5*v; ns_opt = psi0+psi_p*p+psi_d*s; dev_ns=ns-ns_opt
|
| 725 |
+
# ns_pen = k_ns*dev_ns^2*(1+a_ns*tanh(dev_ns))
|
| 726 |
+
# dp_pen = k_dp*((s-p)-delta0)^2
|
| 727 |
+
# loss = L0 + term_P + term_D + term_R + lr_pen + ns_pen + dp_pen
|
| 728 |
+
# theta: [L0, cP, aP, cD, aD, cR, aR, phi0, phi_b, phi_p, phi_d,
|
| 729 |
+
# k_lr, a_lr, psi0, psi_p, psi_d, k_ns, a_ns, delta0, k_dp]
|
| 730 |
+
def sl_8(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 731 |
+
ops = utils.get_ops(backend)
|
| 732 |
+
xp = ops.xp
|
| 733 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 734 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 735 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 736 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 737 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 738 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 739 |
+
u = xp.log(lr); v = xp.log(bsz); s = xp.log(D); p = xp.log(P)
|
| 740 |
+
t = [theta[:, i] for i in range(20)]
|
| 741 |
+
|
| 742 |
+
def softplus(x):
|
| 743 |
+
return xp.log(1.0 + ops.exp(ops.clamp(x, min=-20.0, max=20.0)))
|
| 744 |
+
|
| 745 |
+
def sigmoid(x):
|
| 746 |
+
ex = ops.exp(ops.clamp(x, min=-20.0, max=20.0))
|
| 747 |
+
return ex / (1.0 + ex)
|
| 748 |
+
|
| 749 |
+
def tanh(x):
|
| 750 |
+
e2x = ops.exp(ops.clamp(2.0 * x, min=-40.0, max=40.0))
|
| 751 |
+
return (e2x - 1.0) / (e2x + 1.0)
|
| 752 |
+
|
| 753 |
+
def dtanh(x, tanh_x):
|
| 754 |
+
"""Derivative of tanh: 1 - tanh(x)^2. Reuses precomputed tanh_x."""
|
| 755 |
+
return 1.0 - tanh_x ** 2
|
| 756 |
+
|
| 757 |
+
L0 = t[0]
|
| 758 |
+
cP_raw = t[1]; aP_raw = t[2]
|
| 759 |
+
cD_raw = t[3]; aD_raw = t[4]
|
| 760 |
+
cR_raw = t[5]; aR_raw = t[6]
|
| 761 |
+
|
| 762 |
+
cP = softplus(cP_raw); aP = softplus(aP_raw)
|
| 763 |
+
cD = softplus(cD_raw); aD = softplus(aD_raw)
|
| 764 |
+
cR = softplus(cR_raw); aR = softplus(aR_raw)
|
| 765 |
+
|
| 766 |
+
sig_cP = sigmoid(cP_raw); sig_aP = sigmoid(aP_raw)
|
| 767 |
+
sig_cD = sigmoid(cD_raw); sig_aD = sigmoid(aD_raw)
|
| 768 |
+
sig_cR = sigmoid(cR_raw); sig_aR = sigmoid(aR_raw)
|
| 769 |
+
|
| 770 |
+
exp_P = ops.exp(-aP[:, None] * p[None, :])
|
| 771 |
+
exp_D = ops.exp(-aD[:, None] * s[None, :])
|
| 772 |
+
sp_diff = s[None, :] - p[None, :]
|
| 773 |
+
exp_R = ops.exp(-aR[:, None] * sp_diff)
|
| 774 |
+
|
| 775 |
+
term_P = cP[:, None] * exp_P
|
| 776 |
+
term_D = cD[:, None] * exp_D
|
| 777 |
+
term_R = cR[:, None] * exp_R
|
| 778 |
+
|
| 779 |
+
lr_opt = t[7][:, None] + t[8][:, None]*v[None, :] + t[9][:, None]*p[None, :] + t[10][:, None]*s[None, :]
|
| 780 |
+
dev_lr = u[None, :] - lr_opt
|
| 781 |
+
k_lr_raw = t[11]; a_lr_raw = t[12]
|
| 782 |
+
k_lr = softplus(k_lr_raw)
|
| 783 |
+
a_lr = tanh(a_lr_raw)
|
| 784 |
+
sig_k_lr = sigmoid(k_lr_raw)
|
| 785 |
+
dtanh_a_lr = dtanh(a_lr_raw, a_lr) # 1 - tanh(a_lr_raw)^2
|
| 786 |
+
|
| 787 |
+
tanh_dev_lr = tanh(dev_lr)
|
| 788 |
+
dtanh_dev_lr = dtanh(dev_lr, tanh_dev_lr)
|
| 789 |
+
lr_pen = k_lr[:, None] * dev_lr**2 * (1.0 + a_lr[:, None] * tanh_dev_lr)
|
| 790 |
+
|
| 791 |
+
ns = u[None, :] - 0.5 * v[None, :]
|
| 792 |
+
ns_opt = t[13][:, None] + t[14][:, None]*p[None, :] + t[15][:, None]*s[None, :]
|
| 793 |
+
dev_ns = ns - ns_opt
|
| 794 |
+
k_ns_raw = t[16]; a_ns_raw = t[17]
|
| 795 |
+
k_ns = softplus(k_ns_raw)
|
| 796 |
+
a_ns = tanh(a_ns_raw)
|
| 797 |
+
sig_k_ns = sigmoid(k_ns_raw)
|
| 798 |
+
dtanh_a_ns = dtanh(a_ns_raw, a_ns)
|
| 799 |
+
|
| 800 |
+
tanh_dev_ns = tanh(dev_ns)
|
| 801 |
+
dtanh_dev_ns = dtanh(dev_ns, tanh_dev_ns)
|
| 802 |
+
ns_pen = k_ns[:, None] * dev_ns**2 * (1.0 + a_ns[:, None] * tanh_dev_ns)
|
| 803 |
+
|
| 804 |
+
delta0 = t[18]
|
| 805 |
+
k_dp_raw = t[19]
|
| 806 |
+
k_dp = softplus(k_dp_raw)
|
| 807 |
+
sig_k_dp = sigmoid(k_dp_raw)
|
| 808 |
+
dp_diff = sp_diff - delta0[:, None]
|
| 809 |
+
dp_pen = k_dp[:, None] * dp_diff**2
|
| 810 |
+
|
| 811 |
+
pred = L0[:, None] + term_P + term_D + term_R + lr_pen + ns_pen + dp_pen
|
| 812 |
+
|
| 813 |
+
# ---- Jacobian ----
|
| 814 |
+
ones_BM = pred * 0.0 + 1.0
|
| 815 |
+
|
| 816 |
+
# t[0] = L0
|
| 817 |
+
d_0 = ones_BM
|
| 818 |
+
|
| 819 |
+
# t[1] = cP (pre-softplus): d/dcP_raw = sig(cP_raw) * exp_P
|
| 820 |
+
d_1 = sig_cP[:, None] * exp_P
|
| 821 |
+
|
| 822 |
+
# t[2] = aP (pre-softplus): d/daP_raw = sig(aP_raw) * cP * (-p) * exp_P
|
| 823 |
+
d_2 = sig_aP[:, None] * cP[:, None] * (-p[None, :]) * exp_P
|
| 824 |
+
|
| 825 |
+
# t[3] = cD: d/dcD_raw = sig(cD_raw) * exp_D
|
| 826 |
+
d_3 = sig_cD[:, None] * exp_D
|
| 827 |
+
|
| 828 |
+
# t[4] = aD: d/daD_raw = sig(aD_raw) * cD * (-s) * exp_D
|
| 829 |
+
d_4 = sig_aD[:, None] * cD[:, None] * (-s[None, :]) * exp_D
|
| 830 |
+
|
| 831 |
+
# t[5] = cR: d/dcR_raw = sig(cR_raw) * exp_R
|
| 832 |
+
d_5 = sig_cR[:, None] * exp_R
|
| 833 |
+
|
| 834 |
+
# t[6] = aR: d/daR_raw = sig(aR_raw) * cR * (-(s-p)) * exp_R
|
| 835 |
+
d_6 = sig_aR[:, None] * cR[:, None] * (-sp_diff) * exp_R
|
| 836 |
+
|
| 837 |
+
# For lr_pen = k_lr * dev^2 * (1 + a_lr * tanh(dev))
|
| 838 |
+
# Let h(dev) = dev^2 * (1 + a_lr * tanh(dev))
|
| 839 |
+
# dh/d(dev) = 2*dev*(1 + a_lr*tanh(dev)) + dev^2*a_lr*(1-tanh(dev)^2)
|
| 840 |
+
bracket_lr = 1.0 + a_lr[:, None] * tanh_dev_lr
|
| 841 |
+
dh_ddev_lr = 2.0 * dev_lr * bracket_lr + dev_lr**2 * a_lr[:, None] * dtanh_dev_lr
|
| 842 |
+
|
| 843 |
+
# t[7] = phi0: d(dev)/d(phi0) = -1
|
| 844 |
+
d_7 = k_lr[:, None] * dh_ddev_lr * (-1.0)
|
| 845 |
+
|
| 846 |
+
# t[8] = phi_b: d(dev)/d(phi_b) = -v
|
| 847 |
+
d_8 = k_lr[:, None] * dh_ddev_lr * (-v[None, :])
|
| 848 |
+
|
| 849 |
+
# t[9] = phi_p: d(dev)/d(phi_p) = -p
|
| 850 |
+
d_9 = k_lr[:, None] * dh_ddev_lr * (-p[None, :])
|
| 851 |
+
|
| 852 |
+
# t[10] = phi_d: d(dev)/d(phi_d) = -s
|
| 853 |
+
d_10 = k_lr[:, None] * dh_ddev_lr * (-s[None, :])
|
| 854 |
+
|
| 855 |
+
# t[11] = k_lr (pre-softplus): d/dk_lr_raw = sig(k_lr_raw) * dev^2 * bracket_lr
|
| 856 |
+
d_11 = sig_k_lr[:, None] * dev_lr**2 * bracket_lr
|
| 857 |
+
|
| 858 |
+
# t[12] = a_lr (pre-tanh): d/da_lr_raw = dtanh(a_lr_raw) * k_lr * dev^2 * tanh(dev)
|
| 859 |
+
d_12 = dtanh_a_lr[:, None] * k_lr[:, None] * dev_lr**2 * tanh_dev_lr
|
| 860 |
+
|
| 861 |
+
# For ns_pen = k_ns * dev_ns^2 * (1 + a_ns * tanh(dev_ns))
|
| 862 |
+
bracket_ns = 1.0 + a_ns[:, None] * tanh_dev_ns
|
| 863 |
+
dh_ddev_ns = 2.0 * dev_ns * bracket_ns + dev_ns**2 * a_ns[:, None] * dtanh_dev_ns
|
| 864 |
+
|
| 865 |
+
# t[13] = psi0: d(dev_ns)/d(psi0) = -1
|
| 866 |
+
d_13 = k_ns[:, None] * dh_ddev_ns * (-1.0)
|
| 867 |
+
|
| 868 |
+
# t[14] = psi_p: d(dev_ns)/d(psi_p) = -p
|
| 869 |
+
d_14 = k_ns[:, None] * dh_ddev_ns * (-p[None, :])
|
| 870 |
+
|
| 871 |
+
# t[15] = psi_d: d(dev_ns)/d(psi_d) = -s
|
| 872 |
+
d_15 = k_ns[:, None] * dh_ddev_ns * (-s[None, :])
|
| 873 |
+
|
| 874 |
+
# t[16] = k_ns (pre-softplus): d/dk_ns_raw = sig(k_ns_raw) * dev_ns^2 * bracket_ns
|
| 875 |
+
d_16 = sig_k_ns[:, None] * dev_ns**2 * bracket_ns
|
| 876 |
+
|
| 877 |
+
# t[17] = a_ns (pre-tanh): d/da_ns_raw = dtanh(a_ns_raw) * k_ns * dev_ns^2 * tanh(dev_ns)
|
| 878 |
+
d_17 = dtanh_a_ns[:, None] * k_ns[:, None] * dev_ns**2 * tanh_dev_ns
|
| 879 |
+
|
| 880 |
+
# t[18] = delta0: dp_pen = k_dp * ((s-p) - delta0)^2
|
| 881 |
+
# d/ddelta0 = k_dp * 2*((s-p)-delta0) * (-1) = -2*k_dp*dp_diff
|
| 882 |
+
d_18 = -2.0 * k_dp[:, None] * dp_diff
|
| 883 |
+
|
| 884 |
+
# t[19] = k_dp (pre-softplus): d/dk_dp_raw = sig(k_dp_raw) * dp_diff^2
|
| 885 |
+
d_19 = sig_k_dp[:, None] * dp_diff**2
|
| 886 |
+
|
| 887 |
+
jac = ops.stack([d_0, d_1, d_2, d_3, d_4, d_5, d_6,
|
| 888 |
+
d_7, d_8, d_9, d_10, d_11, d_12,
|
| 889 |
+
d_13, d_14, d_15, d_16, d_17, d_18, d_19], axis=-1)
|
| 890 |
+
|
| 891 |
+
if pred.shape[0] == 1:
|
| 892 |
+
return pred[0], jac[0]
|
| 893 |
+
return pred, jac
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
# sl_9 (15p): Direct poly2(log10) without exp transform
|
| 897 |
+
# x1=log10(lr), x2=log10(bsz), x3=log10(D), x4=log10(P)
|
| 898 |
+
# loss = c0 + c1*x1 + ... + c14*x3*x4
|
| 899 |
+
# theta: 15 coefficients
|
| 900 |
+
def sl_9(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 901 |
+
ops = utils.get_ops(backend)
|
| 902 |
+
xp = ops.xp
|
| 903 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 904 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 905 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 906 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 907 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 908 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 909 |
+
log10_inv = 1.0 / xp.log(lr * 0.0 + 10.0)
|
| 910 |
+
z0 = xp.log(lr) * log10_inv
|
| 911 |
+
z1 = xp.log(bsz) * log10_inv
|
| 912 |
+
z2 = xp.log(D) * log10_inv
|
| 913 |
+
z3 = xp.log(P) * log10_inv
|
| 914 |
+
ones = z0 * 0.0 + 1.0
|
| 915 |
+
feat_list = [
|
| 916 |
+
ones, z0, z1, z2, z3,
|
| 917 |
+
z0*z0, z1*z1, z2*z2, z3*z3,
|
| 918 |
+
z0*z1, z0*z2, z0*z3, z1*z2, z1*z3, z2*z3,
|
| 919 |
+
]
|
| 920 |
+
if backend == "torch":
|
| 921 |
+
features = xp.stack(feat_list, dim=-1)
|
| 922 |
+
pred = xp.matmul(theta, features.T)
|
| 923 |
+
else:
|
| 924 |
+
features = xp.stack(feat_list, axis=-1)
|
| 925 |
+
pred = theta @ features.T
|
| 926 |
+
|
| 927 |
+
# Jacobian: pred = theta @ features.T (linear in theta)
|
| 928 |
+
# d pred / d theta_i = features[:, i], broadcast to (B, M)
|
| 929 |
+
# jac shape: (B, M, 15)
|
| 930 |
+
B = theta.shape[0]
|
| 931 |
+
M = features.shape[0]
|
| 932 |
+
# features is (M, 15), broadcast to (B, M, 15)
|
| 933 |
+
jac = xp.broadcast_to(features[None, :, :], (B, M, 15)) * 1.0
|
| 934 |
+
|
| 935 |
+
if pred.shape[0] == 1:
|
| 936 |
+
return pred[0], jac[0]
|
| 937 |
+
return pred, jac
|
| 938 |
+
|
| 939 |
+
|
| 940 |
+
# sl_10 (18p): Direct poly2(log) + fixed-exponent power features
|
| 941 |
+
# loss = poly2(log(lr), log(bsz), log(D), log(P)) + w_D*D^(-0.5) + w_P*P^(-0.5) + w_bsz*bsz^(-1)
|
| 942 |
+
# theta: [c0..c14 (15 poly coeffs), w_D, w_P, w_bsz]
|
| 943 |
+
def sl_10(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 944 |
+
ops = utils.get_ops(backend)
|
| 945 |
+
xp = ops.xp
|
| 946 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 947 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 948 |
+
lr = ops.clamp_min(X[:, 0], _EPS)
|
| 949 |
+
bsz = ops.clamp_min(X[:, 1], _EPS)
|
| 950 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 951 |
+
P = ops.clamp_min(X[:, 3], _EPS)
|
| 952 |
+
z0 = xp.log(lr); z1 = xp.log(bsz); z2 = xp.log(D); z3 = xp.log(P)
|
| 953 |
+
ones = z0 * 0.0 + 1.0
|
| 954 |
+
feat_list = [
|
| 955 |
+
ones, z0, z1, z2, z3,
|
| 956 |
+
z0*z0, z1*z1, z2*z2, z3*z3,
|
| 957 |
+
z0*z1, z0*z2, z0*z3, z1*z2, z1*z3, z2*z3,
|
| 958 |
+
]
|
| 959 |
+
if backend == "torch":
|
| 960 |
+
features = xp.stack(feat_list, dim=-1)
|
| 961 |
+
poly = xp.matmul(theta[:, :15], features.T)
|
| 962 |
+
else:
|
| 963 |
+
features = xp.stack(feat_list, axis=-1)
|
| 964 |
+
poly = theta[:, :15] @ features.T
|
| 965 |
+
w_D = theta[:, 15]
|
| 966 |
+
w_P = theta[:, 16]
|
| 967 |
+
w_bsz = theta[:, 17]
|
| 968 |
+
|
| 969 |
+
D_inv_sqrt = D[None, :] ** (-0.5)
|
| 970 |
+
P_inv_sqrt = P[None, :] ** (-0.5)
|
| 971 |
+
bsz_inv = 1.0 / bsz[None, :]
|
| 972 |
+
|
| 973 |
+
power_terms = (w_D[:, None] * D_inv_sqrt
|
| 974 |
+
+ w_P[:, None] * P_inv_sqrt
|
| 975 |
+
+ w_bsz[:, None] * bsz_inv)
|
| 976 |
+
pred = poly + power_terms
|
| 977 |
+
|
| 978 |
+
# ---- Jacobian ----
|
| 979 |
+
B = theta.shape[0]
|
| 980 |
+
M = features.shape[0]
|
| 981 |
+
|
| 982 |
+
# For c0..c14: d/dc_i = features[:, i], broadcast to (B, M)
|
| 983 |
+
# For w_D: d/dw_D = D^(-0.5)
|
| 984 |
+
# For w_P: d/dw_P = P^(-0.5)
|
| 985 |
+
# For w_bsz: d/dw_bsz = 1/bsz
|
| 986 |
+
ones_BM = pred * 0.0 + 1.0
|
| 987 |
+
|
| 988 |
+
partials = []
|
| 989 |
+
for i in range(15):
|
| 990 |
+
# features[:, i] has shape (M,), broadcast to (B, M)
|
| 991 |
+
partials.append(ones_BM * features[:, i][None, :])
|
| 992 |
+
partials.append(D_inv_sqrt * ones_BM)
|
| 993 |
+
partials.append(P_inv_sqrt * ones_BM)
|
| 994 |
+
partials.append(bsz_inv * ones_BM)
|
| 995 |
+
|
| 996 |
+
jac = ops.stack(partials, axis=-1)
|
| 997 |
+
|
| 998 |
+
if pred.shape[0] == 1:
|
| 999 |
+
return pred[0], jac[0]
|
| 1000 |
+
return pred, jac
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
PARAM_BOUNDS = {
|
| 1004 |
+
# Dataset: lr∈[1.2e-4,0.022], bsz∈[16,4096], D∈[2e9,1e11], P∈[6e7,1.07e9]
|
| 1005 |
+
# z0=log(lr)∈[-9,-4], z1=log(bsz)∈[3,8], z2=log(D)∈[21,25], z3=log(P)∈[18,21]
|
| 1006 |
+
# lm_loss∈[2.08,3.70], log(loss)∈[0.73,1.31], Δlog(loss)≈0.58
|
| 1007 |
+
|
| 1008 |
+
# sl_1: 15p poly2 -> exp (NO clamp in model)
|
| 1009 |
+
# [bias, z0, z1, z2, z3, z0^2, z1^2, z2^2, z3^2, z0z1, z0z2, z0z3, z1z2, z1z3, z2z3]
|
| 1010 |
+
# poly output = log(loss) ∈ [0.73, 1.31]. Linear coeff bounds: |c|*Δz ≤ 0.58.
|
| 1011 |
+
# Δz0=5.2,Δz1=5.5,Δz2=3.9,Δz3=2.9 → max|c_linear|≤0.2. Quad/cross: |c|*Δ(z^2) ≤ 0.58.
|
| 1012 |
+
# Max Δ(z2^2)=183 → |c|≤0.003. Bias absorbs mean offsets: z2_mean*c2≈23*0.2=4.6 → bias∈(-15,15).
|
| 1013 |
+
# Fit: bias=6.4, linear max=0.25, quad/cross max=0.013.
|
| 1014 |
+
"sl_1": [(-15, 15)] + [(-0.35, 0.35)] * 4 + [(-0.025, 0.025)] * 10,
|
| 1015 |
+
|
| 1016 |
+
# sl_2: 26p softplus-penalty model (all softplus/tanh-clamped internally)
|
| 1017 |
+
# [L_inf, Cp, ap, Cd, ad, Cdp, adp, k, u0, up, us, uv, v0, vp, vs,
|
| 1018 |
+
# cL0, cLp, cLs, cB0, cBp, cBs, rho, cLv, cBv, Cbb, abb]
|
| 1019 |
+
# L_inf: irreducible loss ≈ 1.0. Cp/Cd/Cdp pre-softplus: fit found ~9-13 → allow up to 15.
|
| 1020 |
+
# ap/ad/adp/abb: rate params, pre-softplus.
|
| 1021 |
+
# k: mixing param for D/P interaction; k>3 risks overflow in exp(-adp*(s-k*p)) → restrict k≤3.
|
| 1022 |
+
# u0: optimal log(lr) offset; v0: optimal log(bsz) offset.
|
| 1023 |
+
# cL0,cB0: LR/BSZ curvature pre-softplus (fit found ~8-10); cLp,cBp,cLs,cBs,cLv,cBv: scaling.
|
| 1024 |
+
# rho: correlation pre-tanh; Cbb/abb: batch-size penalty.
|
| 1025 |
+
"sl_2": (
|
| 1026 |
+
[(0, 3)] # L_inf
|
| 1027 |
+
+ [(-5, 15), (-5, 5)] * 3 # Cp,ap, Cd,ad, Cdp,adp
|
| 1028 |
+
+ [(-5, 3)] # k (k≤3 prevents exp overflow in Cdp term)
|
| 1029 |
+
+ [(-20, 2)] # u0 (log(lr_opt)∈[-9,-4])
|
| 1030 |
+
+ [(-3, 3)] * 3 # up, us, uv
|
| 1031 |
+
+ [(0, 20)] # v0 (log(bsz_opt)∈[3,8]; fit found 11.7)
|
| 1032 |
+
+ [(-3, 3)] * 2 # vp, vs
|
| 1033 |
+
+ [(-12, 12)] * 2 # cL0, cLp (fit found cL0=7.9, cLp=-9.2 near bounds)
|
| 1034 |
+
+ [(-12, 12)] * 2 # cLs, cB0 (fit found cB0=8.0 near upper bound)
|
| 1035 |
+
+ [(-12, 12)] * 2 # cBp, cBs
|
| 1036 |
+
+ [(-8, 8)] # rho (pre-tanh; fit found -4.9 near lower -5)
|
| 1037 |
+
+ [(-12, 12)] * 2 # cLv, cBv
|
| 1038 |
+
+ [(-5, 15), (-5, 5)] # Cbb, abb
|
| 1039 |
+
),
|
| 1040 |
+
|
| 1041 |
+
# sl_3: 24p Chinchilla + LR/BSZ penalties
|
| 1042 |
+
# [E, A, alpha, B, beta, F, wN, wD,
|
| 1043 |
+
# C0, CN, CD, CB, mu0, muN, muD, muB, muND,
|
| 1044 |
+
# G0, GN, GD, nu0, nuN, nuD, nuND]
|
| 1045 |
+
# E: irreducible loss (fit: 1.74). A/B: Chinchilla amplitudes (fit: 113, 3285).
|
| 1046 |
+
# F: joint (N,D) amplitude (fit: 8.4e6, near upper 1e7 → expand to 2e7).
|
| 1047 |
+
# C0,G0: LR/BSZ curvature (fit: 0.14, 0.16).
|
| 1048 |
+
# mu0: log(lr_opt) intercept (fit: -7.4). nu0: log(bsz_opt) intercept (fit: 7.9).
|
| 1049 |
+
"sl_3": (
|
| 1050 |
+
[(0.5, 3)] # E (fit: 1.74)
|
| 1051 |
+
+ [(0, 5e5), (0.05, 2)] * 2 # A,alpha, B,beta
|
| 1052 |
+
+ [(0, 2e7), (0.05, 2), (0.05, 2)] # F, wN, wD
|
| 1053 |
+
+ [(0, 3), (-3, 3), (-3, 3), (-3, 3)] # C0, CN, CD, CB
|
| 1054 |
+
+ [(-20, 5)] # mu0 (log(lr_opt) intercept; fit: -7.4)
|
| 1055 |
+
+ [(-10, 3)] * 4 # muN, muD, muB, muND
|
| 1056 |
+
+ [(0, 3), (-3, 3), (-3, 3)] # G0, GN, GD
|
| 1057 |
+
+ [(0, 15)] # nu0 (log(bsz_opt) intercept; fit: 7.9)
|
| 1058 |
+
+ [(-3, 3)] * 3 # nuN, nuD, nuND
|
| 1059 |
+
),
|
| 1060 |
+
|
| 1061 |
+
# sl_4: 20p poly2+extras -> exp (NO clamp in model)
|
| 1062 |
+
# [15 poly coeffs, z2-z3, 1/bsz, 1/bsz^2, 1/D, 1/P]
|
| 1063 |
+
# Same poly as sl_1 for first 15 params. Extra features added inside the exponent.
|
| 1064 |
+
# 1/bsz∈[2.4e-4,0.0625]: |w|*0.062 ≤ 0.58 → |w|≤9.3; fit: -0.031.
|
| 1065 |
+
# 1/bsz^2∈[6e-8,3.9e-3]: |w|*3.9e-3 ≤ 0.58 → |w|≤149; fit: 3.08.
|
| 1066 |
+
# 1/D∈[1e-11,5e-10]: |w|*5e-10 ≤ 0.58 → |w|≤1.2e9; fit: ~0.
|
| 1067 |
+
# 1/P∈[9.3e-10,1.67e-8]: |w|*1.67e-8 ≤ 0.58 → |w|≤3.5e7; fit: ~0.
|
| 1068 |
+
"sl_4": (
|
| 1069 |
+
[(-15, 15)] + [(-0.35, 0.35)] * 4 + [(-0.025, 0.025)] * 10
|
| 1070 |
+
+ [(-0.3, 0.3)] # z2-z3 = log(D/P); fit: -0.093
|
| 1071 |
+
+ [(-10, 10)] # 1/bsz; fit: -0.031
|
| 1072 |
+
+ [(-200, 200)] # 1/bsz^2; fit: 3.08
|
| 1073 |
+
+ [(-1.5e9, 1.5e9)] # 1/D; fit: ~0
|
| 1074 |
+
+ [(-5e7, 5e7)] # 1/P; fit: ~0
|
| 1075 |
+
),
|
| 1076 |
+
|
| 1077 |
+
# sl_5: 19p Chinchilla + exp-decay + LR penalty
|
| 1078 |
+
# [L0, AN, aN, AD, aD, AB, aB, clr0, u0, kb, kn, kd, wb, wn, ws, AR, aR, AX, aX]
|
| 1079 |
+
# AN*exp(-aN*n): n=log(P)∈[18,21]; aN~0.26 → AN~110 for 0.5 contribution.
|
| 1080 |
+
# AD*exp(-aD*s): s=log(D)∈[21,25]; aD~0.52 → AD~7200 for 0.5 contribution.
|
| 1081 |
+
# AB*exp(-aB*v): v=log(bsz)∈[3,8]; contribution~AB*0.2; AB~1 at optimal.
|
| 1082 |
+
# AX*exp(-aX*(s-v)): s-v∈[14,22]; aX~0.49 → AX~535 for visible contribution.
|
| 1083 |
+
# clr0: LR curvature amplitude; u0: log(lr_opt) center.
|
| 1084 |
+
"sl_5": (
|
| 1085 |
+
[(0, 3)] # L0 (fit: 1.5)
|
| 1086 |
+
+ [(0, 2e4), (0.01, 2)] # AN, aN (fit: 110, 0.26)
|
| 1087 |
+
+ [(0, 2e4), (0.01, 2)] # AD, aD (fit: 7202, 0.52)
|
| 1088 |
+
+ [(0, 20), (0.01, 2)] # AB, aB (fit: 1.03, 0.40)
|
| 1089 |
+
+ [(0, 200)] # clr0 (fit: 74.1)
|
| 1090 |
+
+ [(-12, 3)] # u0 (log(lr_opt) center; fit: -1.96)
|
| 1091 |
+
+ [(-2, 2)] * 3 # kb, kn, kd
|
| 1092 |
+
+ [(-2, 2)] * 3 # wb, wn, ws
|
| 1093 |
+
+ [(0, 5), (0, 2)] # AR, aR (fit: 0.27, 0.11)
|
| 1094 |
+
+ [(0, 2000), (0, 2)] # AX, aX (fit: 535, 0.49)
|
| 1095 |
+
),
|
| 1096 |
+
|
| 1097 |
+
# sl_6: 14p L_inf + exp(poly13), HAS clamp [-50,50] on exponent
|
| 1098 |
+
# [L_inf, w0, w_d, w_p, w_dp, w_lr, w_lr2, w_bsz, w_bsz2, w_lrbsz, w_lrD, w_lrP, w_bszD, w_bszP]
|
| 1099 |
+
# exp(poly) = lm_loss - L_inf ∈ (0, 1.7]; log of that ≤ 0.53. Same scale analysis as sl_1.
|
| 1100 |
+
# w_lr,w_bsz: up to 0.47 (z0/z1 range 5.2/5.5); w_dp,cross: small (quad range ~143→|c|≤0.004).
|
| 1101 |
+
# Fit (DE): L_inf=1.82, w0=-0.74, w_d=0.15, w_p=0.31, w_dp=-0.011, w_lr=0.47, w_lr2=0.052,
|
| 1102 |
+
# w_bsz=0.36, w_bsz2=0.034, w_lrbsz=-0.029, w_lrD=-0.007, w_lrP=0.027, w_bszD=-0.033, w_bszP=-0.007
|
| 1103 |
+
"sl_6": (
|
| 1104 |
+
[(0, 3)] # L_inf
|
| 1105 |
+
+ [(-8, 8)] # w0 (bias of inner poly)
|
| 1106 |
+
+ [(-0.5, 0.5)] * 2 # w_d, w_p (z2,z3 linear)
|
| 1107 |
+
+ [(-0.025, 0.025)] # w_dp (z2*z3 cross; Δ=143)
|
| 1108 |
+
+ [(-0.6, 0.6)] * 2 # w_lr, w_lr2 (z0 linear and quad)
|
| 1109 |
+
+ [(-0.5, 0.5)] * 2 # w_bsz, w_bsz2 (z1 linear and quad)
|
| 1110 |
+
+ [(-0.04, 0.04)] # w_lrbsz (z0*z1 cross; Δ=62.5)
|
| 1111 |
+
+ [(-0.02, 0.02)] # w_lrD (z0*z2; Δ=139)
|
| 1112 |
+
+ [(-0.04, 0.04)] # w_lrP (z0*z3; Δ=114)
|
| 1113 |
+
+ [(-0.05, 0.05)] # w_bszD (z1*z2; Δ=151)
|
| 1114 |
+
+ [(-0.02, 0.02)] # w_bszP (z1*z3; Δ=120)
|
| 1115 |
+
),
|
| 1116 |
+
|
| 1117 |
+
# sl_7: 31p E + 2*exp(poly2), HAS clamp [-50,50] on each exponent
|
| 1118 |
+
# [E, w1[0..14], w2[0..14]] — each poly2 has same structure as sl_1
|
| 1119 |
+
# Each exp term ≤ lm_loss-E ≤ 1.7; same coefficient analysis as sl_1.
|
| 1120 |
+
# Quad/cross bounds expanded to (-0.03,0.03) since some coefficients found near ±0.02.
|
| 1121 |
+
# Fit: E=1.75, w1_bias=4.26, w2_bias=3.17; quad coeffs up to ±0.02.
|
| 1122 |
+
"sl_7": (
|
| 1123 |
+
[(0, 2.5)] # E
|
| 1124 |
+
+ [(-15, 15)] + [(-0.35, 0.35)] * 4 + [(-0.03, 0.03)] * 10 # w1
|
| 1125 |
+
+ [(-15, 15)] + [(-0.35, 0.35)] * 4 + [(-0.03, 0.03)] * 10 # w2
|
| 1126 |
+
),
|
| 1127 |
+
|
| 1128 |
+
# sl_8: 20p softplus/tanh model (all internally clamped via softplus/tanh)
|
| 1129 |
+
# [L0, cP, aP, cD, aD, cR, aR, phi0, phi_b, phi_p, phi_d,
|
| 1130 |
+
# k_lr, a_lr, psi0, psi_p, psi_d, k_ns, a_ns, delta0, k_dp]
|
| 1131 |
+
# All cP,cD,cR,k_lr,k_ns,k_dp pre-softplus; aP,aD,aR pre-softplus (positive rates).
|
| 1132 |
+
# phi0: log(lr_opt) intercept (fit: -14.1); psi0: log(bsz_opt) intercept (fit: -1.4).
|
| 1133 |
+
# delta0: (log(D/P)) offset; s-p∈[0.6,6.1]; fit: 6.8.
|
| 1134 |
+
# All fit values within bounds; no expansion needed.
|
| 1135 |
+
"sl_8": (
|
| 1136 |
+
[(-1, 3)] # L0 (fit: -0.44)
|
| 1137 |
+
+ [(-3, 12), (-5, 5)] * 3 # cP,aP, cD,aD, cR,aR (pre-softplus)
|
| 1138 |
+
+ [(-20, 3)] # phi0 (fit: -14.1)
|
| 1139 |
+
+ [(-5, 5)] * 3 # phi_b, phi_p, phi_d
|
| 1140 |
+
+ [(-5, 10)] # k_lr (pre-softplus; fit: -4.9)
|
| 1141 |
+
+ [(-5, 5)] # a_lr (pre-tanh; fit: 3.5)
|
| 1142 |
+
+ [(-15, 5)] # psi0 (log(bsz_opt) intercept; fit: -1.4)
|
| 1143 |
+
+ [(-5, 5)] * 2 # psi_p, psi_d
|
| 1144 |
+
+ [(-5, 10)] # k_ns (pre-softplus; fit: 0.74)
|
| 1145 |
+
+ [(-5, 5)] # a_ns (pre-tanh; fit: 4.2)
|
| 1146 |
+
+ [(-5, 10)] # delta0 (s-p offset; fit: 6.8)
|
| 1147 |
+
+ [(-5, 10)] # k_dp (pre-softplus; fit: -4.9)
|
| 1148 |
+
),
|
| 1149 |
+
|
| 1150 |
+
# sl_9: 15p direct poly2(log10), no exp transform
|
| 1151 |
+
# [c0, c_lr, c_bsz, c_D, c_P, c_lr2, c_bsz2, c_D2, c_P2, c_lr_bsz..c_D_P]
|
| 1152 |
+
# log10: z0∈[-3.91,-1.66], z1∈[1.20,3.61], z2∈[9.30,11.0], z3∈[7.78,9.03]
|
| 1153 |
+
# loss ∈ [2.08, 3.70], Δ=1.62. Linear: |c|*Δz ≤ 1.62 → |c_D|≤1.62/1.7=0.95.
|
| 1154 |
+
# Quad: |c|*Δ(z2^2)=|c|*34.5 ≤ 1.62 → |c|≤0.047. Cross: Δ(z2*z3)=27 → |c|≤0.06.
|
| 1155 |
+
# Bias must absorb z3 contribution at mean: c_P*z3_mean≈-0.31*8.4=-2.6. Fit: bias=16.09.
|
| 1156 |
+
# Fit: c0=16.09, c_D=-2.06, c_P=-0.31, quad max=0.14, cross max=0.13.
|
| 1157 |
+
"sl_9": (
|
| 1158 |
+
[(-5, 30)] # c0 (bias; fit: 16.09)
|
| 1159 |
+
+ [(-4, 4)] * 4 # c_lr, c_bsz, c_D, c_P (fit max: |c_D|=2.06)
|
| 1160 |
+
+ [(-0.3, 0.3)] * 10 # quad + cross coefficients (fit max: 0.14)
|
| 1161 |
+
),
|
| 1162 |
+
|
| 1163 |
+
# sl_10: 18p direct poly2(log) + power features, no exp
|
| 1164 |
+
# [c0..c14, w_D, w_P, w_bsz] (natural log, not log10)
|
| 1165 |
+
# Natural log: z0∈[-9,-4], z1∈[3,8], z2∈[21,25], z3∈[18,21]
|
| 1166 |
+
# Δz3=2.9 → max|c_P|≤1.62/2.9=0.56. Quad: Δ(z2^2)=183 → |c|≤0.009.
|
| 1167 |
+
# Bias absorbs z3 contribution: at mean z3=19.3, c3=-1.34 → 19.3*(-1.34)=-25.9; bias∈(-5,35).
|
| 1168 |
+
# Power: D^(-0.5)∈[3.2e-6,2.2e-5], Δ=1.92e-5 → |w_D|≤1.62/1.92e-5=84375; fit: 1.68e4.
|
| 1169 |
+
# P^(-0.5)∈[3.1e-5,1.3e-4], Δ=9.9e-5 → |w_P|≤1.62/9.9e-5=16364; fit: -3469.
|
| 1170 |
+
# 1/bsz∈[2.4e-4,0.0625], Δ=0.062 → |w_bsz|≤26; fit: 1.0.
|
| 1171 |
+
"sl_10": (
|
| 1172 |
+
[(-5, 35)] # c0 (bias; fit: 16.27)
|
| 1173 |
+
+ [(-2, 2)] * 4 # c_lr, c_bsz, c_D, c_P (fit: c_P=-1.34)
|
| 1174 |
+
+ [(-0.1, 0.1)] * 10 # quad + cross (fit max: 0.044)
|
| 1175 |
+
+ [(-5e4, 5e4)] # w_D (D^-0.5; fit: 1.68e4)
|
| 1176 |
+
+ [(-2e4, 2e4)] # w_P (P^-0.5; fit: -3469)
|
| 1177 |
+
+ [(-15, 15)] # w_bsz (1/bsz; fit: 1.0)
|
| 1178 |
+
),
|
| 1179 |
+
}
|
| 1180 |
+
|
| 1181 |
+
LAW_REGISTRY = {
|
| 1182 |
+
"sl_1": sl_1, "sl_2": sl_2, "sl_3": sl_3, "sl_4": sl_4, "sl_5": sl_5,
|
| 1183 |
+
"sl_6": sl_6, "sl_7": sl_7, "sl_8": sl_8, "sl_9": sl_9, "sl_10": sl_10,
|
| 1184 |
+
}
|
| 1185 |
+
PARAM_COUNTS = {
|
| 1186 |
+
"sl_1": 15, "sl_2": 26, "sl_3": 24, "sl_4": 20, "sl_5": 19,
|
| 1187 |
+
"sl_6": 14, "sl_7": 31, "sl_8": 20, "sl_9": 15, "sl_10": 18,
|
| 1188 |
+
}
|
lr_bsz_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed6c33d124562015805d3715f9f01a58e5d27befcd63d30be23a94eedc9caa68
|
| 3 |
+
size 3944
|
lr_bsz_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e8bf513e7c7da2b82e3d324c19643256e3f5cc5770aa4d0dcbc0aa3c38768e5
|
| 3 |
+
size 35166
|
moe_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scaling laws for Mixture-of-Experts models.
|
| 2 |
+
|
| 3 |
+
X columns: [num_experts (E), dense_parameter_count (N)]
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import Literal
|
| 7 |
+
|
| 8 |
+
import benchmark.dataset.utils as utils
|
| 9 |
+
|
| 10 |
+
_EPS = 1e-12
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Scaling law 1 (4 params):
|
| 14 |
+
# L_inf + B / (N^alpha * E^beta)
|
| 15 |
+
# theta: [L_inf, B, alpha, beta]
|
| 16 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 17 |
+
ops = utils.get_ops(backend)
|
| 18 |
+
xp = ops.xp
|
| 19 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 20 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 21 |
+
|
| 22 |
+
E = ops.clamp_min(X[:, 0], _EPS)
|
| 23 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 24 |
+
|
| 25 |
+
L_inf = theta[:, 0]
|
| 26 |
+
B = theta[:, 1]
|
| 27 |
+
alpha = theta[:, 2]
|
| 28 |
+
beta = theta[:, 3]
|
| 29 |
+
|
| 30 |
+
logN = xp.log(ops.clamp_min(N, _EPS))
|
| 31 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 32 |
+
|
| 33 |
+
denom = (N[None, :] ** alpha[:, None]) * (E[None, :] ** beta[:, None])
|
| 34 |
+
denom = ops.clamp_min(denom, _EPS)
|
| 35 |
+
|
| 36 |
+
frac = B[:, None] / denom # B / (N^alpha * E^beta)
|
| 37 |
+
pred = L_inf[:, None] + frac
|
| 38 |
+
|
| 39 |
+
ones = pred * 0.0 + 1.0
|
| 40 |
+
# d/d(L_inf) = 1
|
| 41 |
+
d_L_inf = ones
|
| 42 |
+
# d/d(B) = 1 / denom
|
| 43 |
+
d_B = frac / B[:, None] # = 1/denom, but reuse frac
|
| 44 |
+
# d/d(alpha) = -B / denom * log(N) = -frac * log(N)
|
| 45 |
+
d_alpha = -frac * logN[None, :]
|
| 46 |
+
# d/d(beta) = -frac * log(E)
|
| 47 |
+
d_beta = -frac * logE[None, :]
|
| 48 |
+
|
| 49 |
+
jac = ops.stack([d_L_inf, d_B, d_alpha, d_beta], axis=-1)
|
| 50 |
+
|
| 51 |
+
if pred.shape[0] == 1:
|
| 52 |
+
return pred[0], jac[0]
|
| 53 |
+
return pred, jac
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# Scaling law 2 (5 params):
|
| 57 |
+
# L + K * (N^alpha * E^beta)^(-gamma)
|
| 58 |
+
# theta: [L, K, alpha, beta, gamma]
|
| 59 |
+
def sl_2(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 60 |
+
ops = utils.get_ops(backend)
|
| 61 |
+
xp = ops.xp
|
| 62 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 63 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 64 |
+
|
| 65 |
+
E = ops.clamp_min(X[:, 0], _EPS)
|
| 66 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 67 |
+
|
| 68 |
+
L = theta[:, 0]
|
| 69 |
+
K = theta[:, 1]
|
| 70 |
+
alpha = theta[:, 2]
|
| 71 |
+
beta = theta[:, 3]
|
| 72 |
+
gamma = theta[:, 4]
|
| 73 |
+
|
| 74 |
+
logN = xp.log(ops.clamp_min(N, _EPS))
|
| 75 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 76 |
+
|
| 77 |
+
base = (N[None, :] ** alpha[:, None]) * (E[None, :] ** beta[:, None])
|
| 78 |
+
base = ops.clamp_min(base, _EPS)
|
| 79 |
+
|
| 80 |
+
power_term = base ** (-gamma[:, None]) # (N^a * E^b)^(-g)
|
| 81 |
+
term = K[:, None] * power_term
|
| 82 |
+
pred = L[:, None] + term
|
| 83 |
+
|
| 84 |
+
ones = pred * 0.0 + 1.0
|
| 85 |
+
# log(base) = alpha*log(N) + beta*log(E)
|
| 86 |
+
log_base = alpha[:, None] * logN[None, :] + beta[:, None] * logE[None, :]
|
| 87 |
+
|
| 88 |
+
# d/dL = 1
|
| 89 |
+
d_L = ones
|
| 90 |
+
# d/dK = power_term
|
| 91 |
+
d_K = power_term
|
| 92 |
+
# d/d(alpha) = K * power_term * (-gamma) * log(N) = term * (-gamma) * log(N)
|
| 93 |
+
d_alpha = term * (-gamma[:, None]) * logN[None, :]
|
| 94 |
+
# d/d(beta) = term * (-gamma) * log(E)
|
| 95 |
+
d_beta = term * (-gamma[:, None]) * logE[None, :]
|
| 96 |
+
# d/d(gamma) = K * power_term * (-log(base)) = term * (-log_base)
|
| 97 |
+
d_gamma = term * (-log_base)
|
| 98 |
+
|
| 99 |
+
jac = ops.stack([d_L, d_K, d_alpha, d_beta, d_gamma], axis=-1)
|
| 100 |
+
|
| 101 |
+
if pred.shape[0] == 1:
|
| 102 |
+
return pred[0], jac[0]
|
| 103 |
+
return pred, jac
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Scaling law 3 (6 params):
|
| 107 |
+
# A * P^alpha / (1 + B * E^beta) + C * P^(alpha*0.6) + D
|
| 108 |
+
# (gamma = alpha * 0.6 is hard-coded)
|
| 109 |
+
# theta: [A, alpha, B, beta, C, D]
|
| 110 |
+
def sl_3(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 111 |
+
ops = utils.get_ops(backend)
|
| 112 |
+
xp = ops.xp
|
| 113 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 114 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 115 |
+
|
| 116 |
+
E = ops.clamp_min(X[:, 0], _EPS)
|
| 117 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 118 |
+
|
| 119 |
+
A = theta[:, 0]
|
| 120 |
+
alpha = theta[:, 1]
|
| 121 |
+
B = theta[:, 2]
|
| 122 |
+
beta = theta[:, 3]
|
| 123 |
+
C = theta[:, 4]
|
| 124 |
+
D = theta[:, 5]
|
| 125 |
+
|
| 126 |
+
logN = xp.log(ops.clamp_min(N, _EPS))
|
| 127 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 128 |
+
|
| 129 |
+
N_alpha = N[None, :] ** alpha[:, None] # (B_t, M)
|
| 130 |
+
E_beta = E[None, :] ** beta[:, None] # (B_t, M)
|
| 131 |
+
denom = 1.0 + B[:, None] * E_beta # (B_t, M)
|
| 132 |
+
efficiency = A[:, None] * N_alpha / denom # term1
|
| 133 |
+
|
| 134 |
+
gamma_val = alpha[:, None] * 0.6
|
| 135 |
+
N_gamma = N[None, :] ** gamma_val # (B_t, M)
|
| 136 |
+
param_scale = C[:, None] * N_gamma # term2
|
| 137 |
+
|
| 138 |
+
pred = efficiency + param_scale + D[:, None]
|
| 139 |
+
|
| 140 |
+
ones = pred * 0.0 + 1.0
|
| 141 |
+
|
| 142 |
+
# d/dA = N^alpha / denom
|
| 143 |
+
d_A = N_alpha / denom
|
| 144 |
+
# d/d(alpha):
|
| 145 |
+
# d(efficiency)/d(alpha) = A * N^alpha * log(N) / denom = efficiency * log(N)
|
| 146 |
+
# d(param_scale)/d(alpha) = C * N^(alpha*0.6) * 0.6 * log(N) = param_scale * 0.6 * log(N)
|
| 147 |
+
d_alpha = efficiency * logN[None, :] + param_scale * 0.6 * logN[None, :]
|
| 148 |
+
# d/dB = -A * N^alpha * E^beta / denom^2 = -efficiency * E^beta / denom
|
| 149 |
+
d_B = -efficiency * E_beta / denom
|
| 150 |
+
# d/d(beta) = -A * N^alpha * B * E^beta * log(E) / denom^2
|
| 151 |
+
# = -efficiency * B[:, None] * E_beta * log(E) / denom
|
| 152 |
+
d_beta = -efficiency * B[:, None] * E_beta * logE[None, :] / denom
|
| 153 |
+
# d/dC = N^(alpha*0.6)
|
| 154 |
+
d_C = N_gamma
|
| 155 |
+
# d/dD = 1
|
| 156 |
+
d_D = ones
|
| 157 |
+
|
| 158 |
+
jac = ops.stack([d_A, d_alpha, d_B, d_beta, d_C, d_D], axis=-1)
|
| 159 |
+
|
| 160 |
+
if pred.shape[0] == 1:
|
| 161 |
+
return pred[0], jac[0]
|
| 162 |
+
return pred, jac
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
# Scaling law 4 (6 params):
|
| 166 |
+
# a / (N^alpha * (1 + b*E)^gamma) + c + d*(log(N) - 0.4*log(1+E))
|
| 167 |
+
# theta: [a, alpha, b, gamma, c, d]
|
| 168 |
+
def sl_4(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 169 |
+
ops = utils.get_ops(backend)
|
| 170 |
+
xp = ops.xp
|
| 171 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 172 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 173 |
+
|
| 174 |
+
E = ops.clamp_min(X[:, 0], 1.0)
|
| 175 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 176 |
+
|
| 177 |
+
a = theta[:, 0]
|
| 178 |
+
alpha = theta[:, 1]
|
| 179 |
+
b = theta[:, 2]
|
| 180 |
+
gamma = theta[:, 3]
|
| 181 |
+
c = theta[:, 4]
|
| 182 |
+
d = theta[:, 5]
|
| 183 |
+
|
| 184 |
+
logN = xp.log(N)
|
| 185 |
+
log1E = xp.log(1.0 + E)
|
| 186 |
+
|
| 187 |
+
N_alpha = N[None, :] ** alpha[:, None]
|
| 188 |
+
bE_term = 1.0 + b[:, None] * E[None, :] # (B_t, M)
|
| 189 |
+
bE_term_safe = ops.clamp_min(bE_term, _EPS)
|
| 190 |
+
expert_sat = bE_term_safe ** gamma[:, None]
|
| 191 |
+
expert_sat = ops.clamp_min(expert_sat, _EPS)
|
| 192 |
+
|
| 193 |
+
main = a[:, None] / (N_alpha * expert_sat) # a / (N^alpha * (1+bE)^gamma)
|
| 194 |
+
|
| 195 |
+
log_correction = d[:, None] * (logN[None, :] - 0.4 * log1E[None, :])
|
| 196 |
+
|
| 197 |
+
pred = main + c[:, None] + log_correction
|
| 198 |
+
|
| 199 |
+
ones = pred * 0.0 + 1.0
|
| 200 |
+
|
| 201 |
+
# d/da = 1 / (N^alpha * expert_sat) = main / a[:, None]
|
| 202 |
+
d_a = main / a[:, None]
|
| 203 |
+
# d/d(alpha) = -main * log(N)
|
| 204 |
+
d_alpha = -main * logN[None, :]
|
| 205 |
+
# d/db = -a * gamma * E * (1+bE)^(gamma-1) / (N^alpha * (1+bE)^(2*gamma))
|
| 206 |
+
# = -main * gamma * E / (1+bE)
|
| 207 |
+
# Since main = a / (N^a * (1+bE)^g), and d/db of (1+bE)^g = g*E*(1+bE)^(g-1)
|
| 208 |
+
# d(main)/db = -a * g * E * (1+bE)^(g-1) / (N^a * ((1+bE)^g)^2)
|
| 209 |
+
# but (1+bE)^(g-1) / ((1+bE)^g)^2 = 1/((1+bE)^(g+1))
|
| 210 |
+
# Simpler: main = a * (N^alpha * (1+bE)^gamma)^(-1)
|
| 211 |
+
# d/db = -main * gamma * E / (1+bE)
|
| 212 |
+
d_b = -main * gamma[:, None] * E[None, :] / bE_term_safe
|
| 213 |
+
# d/d(gamma) = -main * log(1+bE)
|
| 214 |
+
log_bE_term = xp.log(ops.clamp_min(bE_term_safe, _EPS))
|
| 215 |
+
d_gamma = -main * log_bE_term
|
| 216 |
+
# d/dc = 1
|
| 217 |
+
d_c = ones
|
| 218 |
+
# d/dd = log(N) - 0.4*log(1+E)
|
| 219 |
+
d_d = logN[None, :] - 0.4 * log1E[None, :]
|
| 220 |
+
# broadcast d_d to (B_t, M)
|
| 221 |
+
d_d = d_d + ones * 0.0
|
| 222 |
+
|
| 223 |
+
jac = ops.stack([d_a, d_alpha, d_b, d_gamma, d_c, d_d], axis=-1)
|
| 224 |
+
|
| 225 |
+
if pred.shape[0] == 1:
|
| 226 |
+
return pred[0], jac[0]
|
| 227 |
+
return pred, jac
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
# Scaling law 5 (6 params):
|
| 231 |
+
# p0 + exp(p1 + p2*log(E) + p3*log(P) + p4*log(E)*log(P)) + p5*log(E)
|
| 232 |
+
# theta: [p0, p1, p2, p3, p4, p5]
|
| 233 |
+
def sl_5(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 234 |
+
ops = utils.get_ops(backend)
|
| 235 |
+
xp = ops.xp
|
| 236 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 237 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 238 |
+
|
| 239 |
+
E = ops.clamp_min(X[:, 0], 1.0)
|
| 240 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 241 |
+
|
| 242 |
+
p0 = theta[:, 0]
|
| 243 |
+
p1 = theta[:, 1]
|
| 244 |
+
p2 = theta[:, 2]
|
| 245 |
+
p3 = theta[:, 3]
|
| 246 |
+
p4 = theta[:, 4]
|
| 247 |
+
p5 = theta[:, 5]
|
| 248 |
+
|
| 249 |
+
log_E = xp.log(E)[None, :] # (1, M)
|
| 250 |
+
log_N = xp.log(N)[None, :] # (1, M)
|
| 251 |
+
|
| 252 |
+
exponent = (
|
| 253 |
+
p1[:, None]
|
| 254 |
+
+ p2[:, None] * log_E
|
| 255 |
+
+ p3[:, None] * log_N
|
| 256 |
+
+ p4[:, None] * log_E * log_N
|
| 257 |
+
)
|
| 258 |
+
# Clip exponent for numerical safety
|
| 259 |
+
exponent = ops.clamp(exponent, min=-50.0, max=50.0)
|
| 260 |
+
|
| 261 |
+
exp_val = ops.exp(exponent) # (B_t, M)
|
| 262 |
+
|
| 263 |
+
pred = p0[:, None] + exp_val + p5[:, None] * log_E
|
| 264 |
+
|
| 265 |
+
ones = pred * 0.0 + 1.0
|
| 266 |
+
|
| 267 |
+
# d/d(p0) = 1
|
| 268 |
+
d_p0 = ones
|
| 269 |
+
# d/d(p1) = exp_val * 1 = exp_val
|
| 270 |
+
d_p1 = exp_val
|
| 271 |
+
# d/d(p2) = exp_val * log_E
|
| 272 |
+
d_p2 = exp_val * log_E
|
| 273 |
+
# d/d(p3) = exp_val * log_N
|
| 274 |
+
d_p3 = exp_val * log_N
|
| 275 |
+
# d/d(p4) = exp_val * log_E * log_N
|
| 276 |
+
d_p4 = exp_val * log_E * log_N
|
| 277 |
+
# d/d(p5) = log_E
|
| 278 |
+
d_p5 = log_E + ones * 0.0 # broadcast to (B_t, M)
|
| 279 |
+
|
| 280 |
+
jac = ops.stack([d_p0, d_p1, d_p2, d_p3, d_p4, d_p5], axis=-1)
|
| 281 |
+
|
| 282 |
+
if pred.shape[0] == 1:
|
| 283 |
+
return pred[0], jac[0]
|
| 284 |
+
return pred, jac
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
# Scaling law 6 (6 params):
|
| 288 |
+
# a * N^(-b) * (1 + c*E^(-d)) + e + f/(E * N^0.05)
|
| 289 |
+
# theta: [a, b, c, d, e, f]
|
| 290 |
+
def sl_6(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 291 |
+
ops = utils.get_ops(backend)
|
| 292 |
+
xp = ops.xp
|
| 293 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 294 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 295 |
+
|
| 296 |
+
E = ops.clamp_min(X[:, 0], 1.0)
|
| 297 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 298 |
+
|
| 299 |
+
a = theta[:, 0]
|
| 300 |
+
b = theta[:, 1]
|
| 301 |
+
c = theta[:, 2]
|
| 302 |
+
d = theta[:, 3]
|
| 303 |
+
e = theta[:, 4]
|
| 304 |
+
f = theta[:, 5]
|
| 305 |
+
|
| 306 |
+
logN = xp.log(ops.clamp_min(N, _EPS))
|
| 307 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 308 |
+
|
| 309 |
+
N_neg_b = N[None, :] ** (-b[:, None]) # (B_t, M)
|
| 310 |
+
E_neg_d = E[None, :] ** (-d[:, None]) # (B_t, M)
|
| 311 |
+
expert_mod = 1.0 + c[:, None] * E_neg_d # (B_t, M)
|
| 312 |
+
base = a[:, None] * N_neg_b # a * N^(-b)
|
| 313 |
+
term1 = base * expert_mod # a * N^(-b) * (1 + c*E^(-d))
|
| 314 |
+
interaction = f[:, None] / (E[None, :] * (N[None, :] ** 0.05)) # f/(E*N^0.05)
|
| 315 |
+
|
| 316 |
+
pred = term1 + e[:, None] + interaction
|
| 317 |
+
|
| 318 |
+
ones = pred * 0.0 + 1.0
|
| 319 |
+
|
| 320 |
+
# d/da = N^(-b) * expert_mod
|
| 321 |
+
d_a = N_neg_b * expert_mod
|
| 322 |
+
# d/db = a * N^(-b) * (-log(N)) * expert_mod = -term1 * log(N)
|
| 323 |
+
d_b = -term1 * logN[None, :]
|
| 324 |
+
# d/dc = a * N^(-b) * E^(-d) = base * E_neg_d
|
| 325 |
+
d_c = base * E_neg_d
|
| 326 |
+
# d/dd = a * N^(-b) * c * E^(-d) * (-log(E)) = -base * c[:, None] * E_neg_d * log(E)
|
| 327 |
+
d_d = -base * c[:, None] * E_neg_d * logE[None, :]
|
| 328 |
+
# d/de = 1
|
| 329 |
+
d_e = ones
|
| 330 |
+
# d/df = 1 / (E * N^0.05)
|
| 331 |
+
d_f = 1.0 / (E[None, :] * (N[None, :] ** 0.05))
|
| 332 |
+
d_f = d_f + ones * 0.0 # ensure broadcast
|
| 333 |
+
|
| 334 |
+
jac = ops.stack([d_a, d_b, d_c, d_d, d_e, d_f], axis=-1)
|
| 335 |
+
|
| 336 |
+
if pred.shape[0] == 1:
|
| 337 |
+
return pred[0], jac[0]
|
| 338 |
+
return pred, jac
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
# Scaling law 7 (6 params):
|
| 342 |
+
# p0 * E^p1 * P^p2 + p3 * P^p4 + p5
|
| 343 |
+
# (multiplicative + additive power law)
|
| 344 |
+
# theta: [p0, p1, p2, p3, p4, p5]
|
| 345 |
+
def sl_7(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 346 |
+
ops = utils.get_ops(backend)
|
| 347 |
+
xp = ops.xp
|
| 348 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 349 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 350 |
+
|
| 351 |
+
E = ops.clamp_min(X[:, 0], _EPS)
|
| 352 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 353 |
+
|
| 354 |
+
p0 = theta[:, 0]
|
| 355 |
+
p1 = theta[:, 1]
|
| 356 |
+
p2 = theta[:, 2]
|
| 357 |
+
p3 = theta[:, 3]
|
| 358 |
+
p4 = theta[:, 4]
|
| 359 |
+
p5 = theta[:, 5]
|
| 360 |
+
|
| 361 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 362 |
+
logN = xp.log(ops.clamp_min(N, _EPS))
|
| 363 |
+
|
| 364 |
+
E_p1 = E[None, :] ** p1[:, None]
|
| 365 |
+
N_p2 = N[None, :] ** p2[:, None]
|
| 366 |
+
N_p4 = N[None, :] ** p4[:, None]
|
| 367 |
+
|
| 368 |
+
term1 = p0[:, None] * E_p1 * N_p2 # p0 * E^p1 * N^p2
|
| 369 |
+
term2 = p3[:, None] * N_p4 # p3 * N^p4
|
| 370 |
+
|
| 371 |
+
pred = term1 + term2 + p5[:, None]
|
| 372 |
+
|
| 373 |
+
ones = pred * 0.0 + 1.0
|
| 374 |
+
|
| 375 |
+
# d/d(p0) = E^p1 * N^p2
|
| 376 |
+
d_p0 = E_p1 * N_p2
|
| 377 |
+
# d/d(p1) = term1 * log(E)
|
| 378 |
+
d_p1 = term1 * logE[None, :]
|
| 379 |
+
# d/d(p2) = term1 * log(N)
|
| 380 |
+
d_p2 = term1 * logN[None, :]
|
| 381 |
+
# d/d(p3) = N^p4
|
| 382 |
+
d_p3 = N_p4
|
| 383 |
+
# d/d(p4) = term2 * log(N)
|
| 384 |
+
d_p4 = term2 * logN[None, :]
|
| 385 |
+
# d/d(p5) = 1
|
| 386 |
+
d_p5 = ones
|
| 387 |
+
|
| 388 |
+
jac = ops.stack([d_p0, d_p1, d_p2, d_p3, d_p4, d_p5], axis=-1)
|
| 389 |
+
|
| 390 |
+
if pred.shape[0] == 1:
|
| 391 |
+
return pred[0], jac[0]
|
| 392 |
+
return pred, jac
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
# Scaling law 8 (4 params):
|
| 396 |
+
# a * N^b * E^c + d
|
| 397 |
+
# theta: [a, b, c, d]
|
| 398 |
+
def sl_8(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 399 |
+
ops = utils.get_ops(backend)
|
| 400 |
+
xp = ops.xp
|
| 401 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 402 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 403 |
+
|
| 404 |
+
E = ops.clamp_min(X[:, 0], _EPS)
|
| 405 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 406 |
+
|
| 407 |
+
a = theta[:, 0]
|
| 408 |
+
b = theta[:, 1]
|
| 409 |
+
c = theta[:, 2]
|
| 410 |
+
d = theta[:, 3]
|
| 411 |
+
|
| 412 |
+
logN = xp.log(ops.clamp_min(N, _EPS))
|
| 413 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 414 |
+
|
| 415 |
+
N_b = N[None, :] ** b[:, None]
|
| 416 |
+
E_c = E[None, :] ** c[:, None]
|
| 417 |
+
term = a[:, None] * N_b * E_c # a * N^b * E^c
|
| 418 |
+
|
| 419 |
+
pred = term + d[:, None]
|
| 420 |
+
|
| 421 |
+
ones = pred * 0.0 + 1.0
|
| 422 |
+
|
| 423 |
+
# d/da = N^b * E^c
|
| 424 |
+
d_a = N_b * E_c
|
| 425 |
+
# d/db = term * log(N)
|
| 426 |
+
d_b = term * logN[None, :]
|
| 427 |
+
# d/dc = term * log(E)
|
| 428 |
+
d_c = term * logE[None, :]
|
| 429 |
+
# d/dd = 1
|
| 430 |
+
d_d = ones
|
| 431 |
+
|
| 432 |
+
jac = ops.stack([d_a, d_b, d_c, d_d], axis=-1)
|
| 433 |
+
|
| 434 |
+
if pred.shape[0] == 1:
|
| 435 |
+
return pred[0], jac[0]
|
| 436 |
+
return pred, jac
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
# Scaling law 9 (4 params):
|
| 440 |
+
# c0 + A * (N * E^g)^(-a)
|
| 441 |
+
# theta: [c0, A, g, a]
|
| 442 |
+
def sl_9(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 443 |
+
ops = utils.get_ops(backend)
|
| 444 |
+
xp = ops.xp
|
| 445 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 446 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 447 |
+
|
| 448 |
+
E = ops.clamp_min(X[:, 0], _EPS)
|
| 449 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 450 |
+
|
| 451 |
+
c0 = theta[:, 0]
|
| 452 |
+
A = theta[:, 1]
|
| 453 |
+
g = theta[:, 2]
|
| 454 |
+
a = theta[:, 3]
|
| 455 |
+
|
| 456 |
+
logN = xp.log(ops.clamp_min(N, _EPS))
|
| 457 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 458 |
+
|
| 459 |
+
N_eff = N[None, :] * (E[None, :] ** g[:, None])
|
| 460 |
+
N_eff = ops.clamp_min(N_eff, _EPS)
|
| 461 |
+
|
| 462 |
+
power_term = N_eff ** (-a[:, None]) # (N*E^g)^(-a)
|
| 463 |
+
term = A[:, None] * power_term
|
| 464 |
+
|
| 465 |
+
pred = c0[:, None] + term
|
| 466 |
+
|
| 467 |
+
ones = pred * 0.0 + 1.0
|
| 468 |
+
|
| 469 |
+
# log(N_eff) = log(N) + g*log(E)
|
| 470 |
+
log_N_eff = logN[None, :] + g[:, None] * logE[None, :]
|
| 471 |
+
|
| 472 |
+
# d/d(c0) = 1
|
| 473 |
+
d_c0 = ones
|
| 474 |
+
# d/d(A) = power_term
|
| 475 |
+
d_A = power_term
|
| 476 |
+
# d/d(g) = A * power_term * (-a) * log(E) = term * (-a) * log(E)
|
| 477 |
+
# since d/dg of N_eff^(-a) = (-a) * N_eff^(-a) * d(log N_eff)/dg
|
| 478 |
+
# and d(log N_eff)/dg = log(E)
|
| 479 |
+
d_g = term * (-a[:, None]) * logE[None, :]
|
| 480 |
+
# d/d(a) = A * power_term * (-log(N_eff)) = term * (-log_N_eff)
|
| 481 |
+
d_a = term * (-log_N_eff)
|
| 482 |
+
|
| 483 |
+
jac = ops.stack([d_c0, d_A, d_g, d_a], axis=-1)
|
| 484 |
+
|
| 485 |
+
if pred.shape[0] == 1:
|
| 486 |
+
return pred[0], jac[0]
|
| 487 |
+
return pred, jac
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
# Scaling law 10 (6 params):
|
| 491 |
+
# bias + A * (N/1e9)^(-alpha) * ((1 + B*E^gamma) / (1 + B))^(-beta)
|
| 492 |
+
# theta: [bias, A, alpha, B, gamma, beta]
|
| 493 |
+
def sl_10(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 494 |
+
ops = utils.get_ops(backend)
|
| 495 |
+
xp = ops.xp
|
| 496 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 497 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 498 |
+
|
| 499 |
+
E = ops.clamp_min(X[:, 0], _EPS)
|
| 500 |
+
N = ops.clamp_min(X[:, 1], _EPS)
|
| 501 |
+
|
| 502 |
+
bias = theta[:, 0]
|
| 503 |
+
A = theta[:, 1]
|
| 504 |
+
alpha = theta[:, 2]
|
| 505 |
+
B = theta[:, 3]
|
| 506 |
+
gamma = theta[:, 4]
|
| 507 |
+
beta = theta[:, 5]
|
| 508 |
+
|
| 509 |
+
logE = xp.log(ops.clamp_min(E, _EPS))
|
| 510 |
+
|
| 511 |
+
N_scaled = N[None, :] / 1e9
|
| 512 |
+
N_scaled = ops.clamp_min(N_scaled, _EPS)
|
| 513 |
+
logN_scaled = xp.log(ops.clamp_min(N_scaled, _EPS))
|
| 514 |
+
|
| 515 |
+
term_N = N_scaled ** (-alpha[:, None]) # (N/1e9)^(-alpha)
|
| 516 |
+
|
| 517 |
+
E_gamma = E[None, :] ** gamma[:, None]
|
| 518 |
+
expert_num = 1.0 + B[:, None] * E_gamma # 1 + B*E^gamma
|
| 519 |
+
expert_den = ops.clamp_min(1.0 + B[:, None], _EPS) # 1 + B
|
| 520 |
+
ratio = expert_num / expert_den # (1 + B*E^g) / (1 + B)
|
| 521 |
+
ratio_safe = ops.clamp_min(ratio, _EPS)
|
| 522 |
+
term_E = ratio_safe ** (-beta[:, None]) # ratio^(-beta)
|
| 523 |
+
|
| 524 |
+
full_term = A[:, None] * term_N * term_E # A * term_N * term_E
|
| 525 |
+
pred = bias[:, None] + full_term
|
| 526 |
+
|
| 527 |
+
ones = pred * 0.0 + 1.0
|
| 528 |
+
|
| 529 |
+
# d/d(bias) = 1
|
| 530 |
+
d_bias = ones
|
| 531 |
+
# d/d(A) = term_N * term_E
|
| 532 |
+
d_A = term_N * term_E
|
| 533 |
+
# d/d(alpha) = full_term * (-log(N_scaled))
|
| 534 |
+
d_alpha = full_term * (-logN_scaled)
|
| 535 |
+
# d/d(B):
|
| 536 |
+
# ratio = (1 + B*E^g) / (1 + B)
|
| 537 |
+
# d(ratio)/dB = (E^g * (1+B) - (1+B*E^g)) / (1+B)^2
|
| 538 |
+
# = (E^g - 1) / (1+B)^2
|
| 539 |
+
# d(term_E)/dB = (-beta) * ratio^(-beta-1) * d(ratio)/dB
|
| 540 |
+
# d(full_term)/dB = A * term_N * d(term_E)/dB
|
| 541 |
+
# = full_term * (-beta) / ratio * (E^g - 1) / (1+B)^2
|
| 542 |
+
# But ratio = expert_num / expert_den, so 1/ratio = expert_den / expert_num
|
| 543 |
+
# = full_term * (-beta) * (E^g - 1) / (expert_num * (1+B))
|
| 544 |
+
# Alternatively: full_term * (-beta) * (E_gamma - 1.0) / (expert_num * expert_den)
|
| 545 |
+
# Wait let me redo: expert_den = 1+B
|
| 546 |
+
# d(ratio)/dB = (E^g - 1) / expert_den^2
|
| 547 |
+
# d(term_E)/dB = (-beta) * ratio^(-beta-1) * (E^g - 1) / expert_den^2
|
| 548 |
+
# = (-beta) * ratio^(-beta) * (1/ratio) * (E^g - 1) / expert_den^2
|
| 549 |
+
# = (-beta) * term_E * (expert_den / expert_num) * (E^g - 1) / expert_den^2
|
| 550 |
+
# = (-beta) * term_E * (E^g - 1) / (expert_num * expert_den)
|
| 551 |
+
# So: d_B = A * term_N * (-beta) * term_E * (E^g - 1) / (expert_num * expert_den)
|
| 552 |
+
# = full_term * (-beta) * (E_gamma - 1.0) / (expert_num * expert_den)
|
| 553 |
+
d_B = full_term * (-beta[:, None]) * (E_gamma - 1.0) / (ops.clamp_min(expert_num, _EPS) * expert_den)
|
| 554 |
+
|
| 555 |
+
# d/d(gamma):
|
| 556 |
+
# d(ratio)/d(gamma) = B * E^g * log(E) / (1+B)
|
| 557 |
+
# d(term_E)/d(gamma) = (-beta) * ratio^(-beta-1) * B * E^g * log(E) / expert_den
|
| 558 |
+
# = (-beta) * term_E * (1/ratio) * B * E^g * log(E) / expert_den
|
| 559 |
+
# = (-beta) * term_E * expert_den / expert_num * B * E^g * log(E) / expert_den
|
| 560 |
+
# = (-beta) * term_E * B * E^g * log(E) / expert_num
|
| 561 |
+
# d_gamma = full_term * (-beta) * B * E^g * log(E) / expert_num
|
| 562 |
+
d_gamma = full_term * (-beta[:, None]) * B[:, None] * E_gamma * logE[None, :] / ops.clamp_min(expert_num, _EPS)
|
| 563 |
+
|
| 564 |
+
# d/d(beta) = A * term_N * d(term_E)/d(beta)
|
| 565 |
+
# term_E = ratio^(-beta)
|
| 566 |
+
# d/d(beta) = ratio^(-beta) * (-log(ratio)) = term_E * (-log(ratio))
|
| 567 |
+
# d_beta = full_term * (-log(ratio))
|
| 568 |
+
log_ratio = xp.log(ops.clamp_min(ratio_safe, _EPS))
|
| 569 |
+
d_beta = full_term * (-log_ratio)
|
| 570 |
+
|
| 571 |
+
jac = ops.stack([d_bias, d_A, d_alpha, d_B, d_gamma, d_beta], axis=-1)
|
| 572 |
+
|
| 573 |
+
if pred.shape[0] == 1:
|
| 574 |
+
return pred[0], jac[0]
|
| 575 |
+
return pred, jac
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
PARAM_BOUNDS = {
|
| 579 |
+
# Dataset: E ∈ {1,2,...,512}, log(E) ∈ [0,6.24]; N ∈ [1.65e7,1.31e9]; loss ∈ [2.0,3.16]
|
| 580 |
+
# sl_1: [L_inf, B, alpha, beta] — L_inf + B / (N^alpha * E^beta)
|
| 581 |
+
# Fit: L_inf≈1.55, B≈38, alpha≈0.19, beta≈0.07
|
| 582 |
+
"sl_1": [(0, 3), (0.1, 500), (0, 1.5), (-0.5, 1.5)],
|
| 583 |
+
# sl_2: [L, K, alpha, beta, gamma] — L + K*(N^alpha * E^beta)^(-gamma)
|
| 584 |
+
# Fit: L≈1.55, K≈38, alpha≈0.88, beta≈0.32, gamma≈0.22; alpha*gamma≈0.19, beta*gamma≈0.07
|
| 585 |
+
"sl_2": [(0, 3), (0, 500), (0, 2), (0, 2), (0, 2)],
|
| 586 |
+
# sl_3: [A, alpha, B, beta, C, D] — A*N^alpha/(1+B*E^beta) + C*N^(alpha*0.6) + D
|
| 587 |
+
# Fit: A≈28, alpha≈-0.22, B≈0.12, beta≈0.64, C≈10.7, D≈1.32
|
| 588 |
+
# alpha<0 required: N^alpha decreases loss as N grows
|
| 589 |
+
"sl_3": [(0, 5e3), (-1.5, 0), (0, 20), (0, 2), (-1e4, 1e4), (-5, 5)],
|
| 590 |
+
# sl_4: [a, alpha, b, gamma, c, d] — a/(N^alpha*(1+b*E)^gamma) + c + d*(logN-0.4*log(1+E))
|
| 591 |
+
# Fit: a≈36, alpha≈0.19, b≈0.39, gamma≈0.12, c≈2.21, d≈-0.03
|
| 592 |
+
"sl_4": [(0.1, 500), (0, 1.5), (0, 20), (0, 1.5), (-5, 5), (-1, 1)],
|
| 593 |
+
# sl_5: [p0,p1,p2,p3,p4,p5] — p0 + exp(p1+p2*logE+p3*logN+p4*logE*logN) + p5*logE
|
| 594 |
+
# Fit: p0≈1.59, p1≈3.80, p2≈-0.24, p3≈-0.20, p4≈0.013, p5≈-0.07
|
| 595 |
+
# Exponent clamped to [-50,50] in model; p3*logN_max≈-0.2*21=-4.2 keeps p1<10 safe
|
| 596 |
+
"sl_5": [(0, 3), (-5, 10), (-1, 1), (-1, 1), (-0.1, 0.1), (-1, 1)],
|
| 597 |
+
# sl_6: [a, b, c, d, e, f] — a*N^(-b)*(1+c*E^(-d)) + e + f/(E*N^0.05)
|
| 598 |
+
# Fit: a≈10.8, b≈0.17, c≈2.03, d≈0.14, e≈1.49, f≈-0.41
|
| 599 |
+
"sl_6": [(0, 200), (0, 1.5), (-5, 20), (-0.5, 2), (-5, 5), (-20, 20)],
|
| 600 |
+
# sl_7: [p0,p1,p2,p3,p4,p5] — p0*E^p1*N^p2 + p3*N^p4 + p5
|
| 601 |
+
# Fit degenerate: p0≈6089, p1≈0, p2≈p4≈-0.215, p3≈-6060, p5≈1.51
|
| 602 |
+
# True behavior: (p0+p3)*N^p2 + p5 ≈ 29*N^(-0.215) + 1.5 (p1≈0 kills E-dependence)
|
| 603 |
+
"sl_7": [(-500, 500), (-1, 0.5), (-1.5, 0.5), (-500, 500), (-1.5, 0.5), (-5, 5)],
|
| 604 |
+
# sl_8: [a, b, c, d] — a*N^b * E^c + d
|
| 605 |
+
# Fit: a≈37.9, b≈-0.19, c≈-0.07, d≈1.55
|
| 606 |
+
"sl_8": [(0, 500), (-1.5, 0.5), (-1.5, 0.5), (0, 3)],
|
| 607 |
+
# sl_9: [c0, A, g, a] — c0 + A*(N*E^g)^(-a)
|
| 608 |
+
# Fit: c0≈1.55, A≈37.9, g≈0.37, a≈0.19
|
| 609 |
+
"sl_9": [(0, 3), (0, 500), (-1, 2), (0, 2)],
|
| 610 |
+
# sl_10: [bias, A, alpha, B, gamma, beta] — bias + A*(N/1e9)^(-alpha)*((1+B*E^gamma)/(1+B))^(-beta)
|
| 611 |
+
# Fit: bias≈1.59, A≈0.70, alpha≈0.20, B≈0.1 (near 0=degenerate: expert term→1), gamma≈2.62, beta≈0.03
|
| 612 |
+
"sl_10": [(0, 3), (0, 15), (0, 2), (0, 100), (0, 6), (0, 2)],
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
LAW_REGISTRY = {
|
| 616 |
+
"sl_1": sl_1, "sl_2": sl_2, "sl_3": sl_3, "sl_4": sl_4, "sl_5": sl_5,
|
| 617 |
+
"sl_6": sl_6, "sl_7": sl_7, "sl_8": sl_8, "sl_9": sl_9, "sl_10": sl_10,
|
| 618 |
+
}
|
| 619 |
+
PARAM_COUNTS = {
|
| 620 |
+
"sl_1": 4, "sl_2": 5, "sl_3": 6, "sl_4": 6, "sl_5": 6,
|
| 621 |
+
"sl_6": 6, "sl_7": 6, "sl_8": 4, "sl_9": 4, "sl_10": 6,
|
| 622 |
+
}
|
moe_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bee730d20bf12888a1ecb2c72977a1049728b98668cad45b4f63e4098c5f1c0
|
| 3 |
+
size 2375
|
moe_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fad8831bc25efb889e0581d5746697b979712ca86f3b4e8002760bde554ec7b6
|
| 3 |
+
size 4088
|
parallel_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scaling laws for parallel (tensor/pipeline) scaling.
|
| 2 |
+
|
| 3 |
+
X columns: [num_params (N), parallel_size (S)]
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import Literal
|
| 7 |
+
|
| 8 |
+
import benchmark.dataset.utils as utils
|
| 9 |
+
|
| 10 |
+
_EPS = 1e-12
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# sl_1 (6p): c0 + cN * N^(-α) + cS * S^(-β) + cNS * N^(-α) * S^(-β)
|
| 14 |
+
# theta: [c0, cN, alpha, cS, beta, cNS]
|
| 15 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 16 |
+
ops = utils.get_ops(backend)
|
| 17 |
+
xp = ops.xp
|
| 18 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 19 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 20 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 21 |
+
S = ops.clamp_min(X[:, 1], _EPS)
|
| 22 |
+
c0, cN, alpha, cS, beta, cNS = [theta[:, i] for i in range(6)]
|
| 23 |
+
Na = N[None, :] ** (-alpha[:, None]) # (B, M)
|
| 24 |
+
Sb = S[None, :] ** (-beta[:, None]) # (B, M)
|
| 25 |
+
NaSb = Na * Sb
|
| 26 |
+
pred = c0[:, None] + cN[:, None] * Na + cS[:, None] * Sb + cNS[:, None] * NaSb
|
| 27 |
+
|
| 28 |
+
# Jacobian: d/d[c0, cN, alpha, cS, beta, cNS]
|
| 29 |
+
ones = pred * 0.0 + 1.0
|
| 30 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS)) # (1, M) or (B, M) after broadcast
|
| 31 |
+
logS = xp.log(ops.clamp_min(S[None, :], _EPS))
|
| 32 |
+
|
| 33 |
+
d_c0 = ones
|
| 34 |
+
d_cN = Na
|
| 35 |
+
# d(Na)/d(alpha) = d(N^(-alpha))/d(alpha) = -N^(-alpha)*log(N) = -Na*logN
|
| 36 |
+
d_alpha = (cN[:, None] * Na + cNS[:, None] * NaSb) * (-logN)
|
| 37 |
+
d_cS = Sb
|
| 38 |
+
# d(Sb)/d(beta) = -Sb*logS
|
| 39 |
+
d_beta = (cS[:, None] * Sb + cNS[:, None] * NaSb) * (-logS)
|
| 40 |
+
d_cNS = NaSb
|
| 41 |
+
jac = ops.stack([d_c0, d_cN, d_alpha, d_cS, d_beta, d_cNS], axis=-1)
|
| 42 |
+
|
| 43 |
+
if pred.shape[0] == 1:
|
| 44 |
+
return pred[0], jac[0]
|
| 45 |
+
return pred, jac
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# sl_2 (5p): c0 + cN * N^(-α) + cS * S^(-β)
|
| 49 |
+
# theta: [c0, cN, alpha, cS, beta]
|
| 50 |
+
def sl_2(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 51 |
+
ops = utils.get_ops(backend)
|
| 52 |
+
xp = ops.xp
|
| 53 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 54 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 55 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 56 |
+
S = ops.clamp_min(X[:, 1], _EPS)
|
| 57 |
+
c0, cN, alpha, cS, beta = [theta[:, i] for i in range(5)]
|
| 58 |
+
Na = N[None, :] ** (-alpha[:, None])
|
| 59 |
+
Sb = S[None, :] ** (-beta[:, None])
|
| 60 |
+
pred = c0[:, None] + cN[:, None] * Na + cS[:, None] * Sb
|
| 61 |
+
|
| 62 |
+
# Jacobian: d/d[c0, cN, alpha, cS, beta]
|
| 63 |
+
ones = pred * 0.0 + 1.0
|
| 64 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 65 |
+
logS = xp.log(ops.clamp_min(S[None, :], _EPS))
|
| 66 |
+
|
| 67 |
+
d_c0 = ones
|
| 68 |
+
d_cN = Na
|
| 69 |
+
d_alpha = cN[:, None] * Na * (-logN)
|
| 70 |
+
d_cS = Sb
|
| 71 |
+
d_beta = cS[:, None] * Sb * (-logS)
|
| 72 |
+
jac = ops.stack([d_c0, d_cN, d_alpha, d_cS, d_beta], axis=-1)
|
| 73 |
+
|
| 74 |
+
if pred.shape[0] == 1:
|
| 75 |
+
return pred[0], jac[0]
|
| 76 |
+
return pred, jac
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# sl_3 (4p): a * N^b + c / (1 + S) + d
|
| 80 |
+
# theta: [a, b, c, d]
|
| 81 |
+
def sl_3(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 82 |
+
ops = utils.get_ops(backend)
|
| 83 |
+
xp = ops.xp
|
| 84 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 85 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 86 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 87 |
+
S = ops.clamp_min(X[:, 1], _EPS)
|
| 88 |
+
a, b, c, d = [theta[:, i] for i in range(4)]
|
| 89 |
+
Nb = N[None, :] ** b[:, None]
|
| 90 |
+
inv_1pS = 1.0 / (1.0 + S[None, :])
|
| 91 |
+
pred = a[:, None] * Nb + c[:, None] * inv_1pS + d[:, None]
|
| 92 |
+
|
| 93 |
+
# Jacobian: d/d[a, b, c, d]
|
| 94 |
+
ones = pred * 0.0 + 1.0
|
| 95 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 96 |
+
|
| 97 |
+
d_a = Nb
|
| 98 |
+
d_b = a[:, None] * Nb * logN
|
| 99 |
+
d_c = inv_1pS + ones * 0.0 # broadcast
|
| 100 |
+
d_d = ones
|
| 101 |
+
jac = ops.stack([d_a, d_b, d_c, d_d], axis=-1)
|
| 102 |
+
|
| 103 |
+
if pred.shape[0] == 1:
|
| 104 |
+
return pred[0], jac[0]
|
| 105 |
+
return pred, jac
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
# sl_4 (4p): a * N^b + c * S^(-0.5) + d (fixed beta=0.5)
|
| 109 |
+
# theta: [a, b, c, d]
|
| 110 |
+
def sl_4(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 111 |
+
ops = utils.get_ops(backend)
|
| 112 |
+
xp = ops.xp
|
| 113 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 114 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 115 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 116 |
+
S = ops.clamp_min(X[:, 1], _EPS)
|
| 117 |
+
a, b, c, d = [theta[:, i] for i in range(4)]
|
| 118 |
+
Nb = N[None, :] ** b[:, None]
|
| 119 |
+
S_inv_half = S[None, :] ** (-0.5)
|
| 120 |
+
pred = a[:, None] * Nb + c[:, None] * S_inv_half + d[:, None]
|
| 121 |
+
|
| 122 |
+
# Jacobian: d/d[a, b, c, d]
|
| 123 |
+
ones = pred * 0.0 + 1.0
|
| 124 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 125 |
+
|
| 126 |
+
d_a = Nb
|
| 127 |
+
d_b = a[:, None] * Nb * logN
|
| 128 |
+
d_c = S_inv_half + ones * 0.0 # broadcast
|
| 129 |
+
d_d = ones
|
| 130 |
+
jac = ops.stack([d_a, d_b, d_c, d_d], axis=-1)
|
| 131 |
+
|
| 132 |
+
if pred.shape[0] == 1:
|
| 133 |
+
return pred[0], jac[0]
|
| 134 |
+
return pred, jac
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
# sl_5 (4p): (A / (N * (k * log(S) + 1)))^α + E
|
| 138 |
+
# theta: [A, k, alpha, E]
|
| 139 |
+
def sl_5(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 140 |
+
ops = utils.get_ops(backend)
|
| 141 |
+
xp = ops.xp
|
| 142 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 143 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 144 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 145 |
+
S = ops.clamp_min(X[:, 1], 1.0)
|
| 146 |
+
A, k, alpha, E = [theta[:, i] for i in range(4)]
|
| 147 |
+
logS = xp.log(S[None, :])
|
| 148 |
+
denom = N[None, :] * (k[:, None] * logS + 1.0)
|
| 149 |
+
denom = ops.clamp_min(denom, _EPS)
|
| 150 |
+
base = A[:, None] / denom
|
| 151 |
+
base = ops.clamp_min(base, _EPS)
|
| 152 |
+
power = base ** alpha[:, None] # base^alpha
|
| 153 |
+
pred = power + E[:, None]
|
| 154 |
+
|
| 155 |
+
# Jacobian: d/d[A, k, alpha, E]
|
| 156 |
+
# Let f = base^alpha, pred = f + E
|
| 157 |
+
# base = A / denom, denom = N*(k*logS + 1)
|
| 158 |
+
ones = pred * 0.0 + 1.0
|
| 159 |
+
log_base = xp.log(ops.clamp_min(base, _EPS))
|
| 160 |
+
|
| 161 |
+
# d(pred)/dA: d(base^alpha)/dA = alpha * base^(alpha-1) * (1/denom)
|
| 162 |
+
# = alpha * base^alpha * (1/base) * (1/denom) = alpha * power / A[:, None]
|
| 163 |
+
# But safer: alpha * power * (1/base) * d(base)/dA = alpha * power / base * (1/denom)
|
| 164 |
+
# = alpha * power / (A/denom) * (1/denom) = alpha * power / A
|
| 165 |
+
d_A = alpha[:, None] * power / ops.clamp_min(A[:, None], _EPS)
|
| 166 |
+
|
| 167 |
+
# d(pred)/dk: d(base^alpha)/dk = alpha * base^(alpha-1) * d(base)/dk
|
| 168 |
+
# d(base)/dk = A * d(1/denom)/dk = -A * logS * N / denom^2 = -base * logS * N / denom
|
| 169 |
+
# Actually: d(base)/dk = -A * N * logS / denom^2 = -(A/denom) * (N*logS)/denom = -base * logS / (k*logS+1)
|
| 170 |
+
# Simpler: d(base)/dk = -A * N * logS / denom^2
|
| 171 |
+
# So d(f)/dk = alpha * power / base * (-A * N * logS / denom^2)
|
| 172 |
+
# = alpha * power * (-N * logS / denom) / base ... hmm
|
| 173 |
+
# Let's use: d(log(base))/dk = d(log(A) - log(denom))/dk = -d(log(denom))/dk
|
| 174 |
+
# d(log(denom))/dk = N*logS / denom ... but denom = N*(k*logS+1)
|
| 175 |
+
# d(denom)/dk = N*logS, so d(log(denom))/dk = N*logS/denom = logS/(k*logS+1)
|
| 176 |
+
# d(f)/dk = f * alpha * d(log(base))/dk = -power * alpha * logS / (k[:, None] * logS + 1.0)
|
| 177 |
+
k_logS_1 = ops.clamp_min(k[:, None] * logS + 1.0, _EPS)
|
| 178 |
+
d_k = -power * alpha[:, None] * logS / k_logS_1
|
| 179 |
+
|
| 180 |
+
# d(pred)/dalpha: d(base^alpha)/dalpha = base^alpha * log(base) = power * log(base)
|
| 181 |
+
d_alpha = power * log_base
|
| 182 |
+
|
| 183 |
+
d_E = ones
|
| 184 |
+
jac = ops.stack([d_A, d_k, d_alpha, d_E], axis=-1)
|
| 185 |
+
|
| 186 |
+
if pred.shape[0] == 1:
|
| 187 |
+
return pred[0], jac[0]
|
| 188 |
+
return pred, jac
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# sl_6 (4p): c0 + c1 * (N^(-α) + S^(-β))
|
| 192 |
+
# theta: [c0, c1, alpha, beta]
|
| 193 |
+
def sl_6(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 194 |
+
ops = utils.get_ops(backend)
|
| 195 |
+
xp = ops.xp
|
| 196 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 197 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 198 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 199 |
+
S = ops.clamp_min(X[:, 1], _EPS)
|
| 200 |
+
c0, c1, alpha, beta = [theta[:, i] for i in range(4)]
|
| 201 |
+
Na = N[None, :] ** (-alpha[:, None])
|
| 202 |
+
Sb = S[None, :] ** (-beta[:, None])
|
| 203 |
+
sumNS = Na + Sb
|
| 204 |
+
pred = c0[:, None] + c1[:, None] * sumNS
|
| 205 |
+
|
| 206 |
+
# Jacobian: d/d[c0, c1, alpha, beta]
|
| 207 |
+
ones = pred * 0.0 + 1.0
|
| 208 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 209 |
+
logS = xp.log(ops.clamp_min(S[None, :], _EPS))
|
| 210 |
+
|
| 211 |
+
d_c0 = ones
|
| 212 |
+
d_c1 = sumNS
|
| 213 |
+
d_alpha = c1[:, None] * Na * (-logN)
|
| 214 |
+
d_beta = c1[:, None] * Sb * (-logS)
|
| 215 |
+
jac = ops.stack([d_c0, d_c1, d_alpha, d_beta], axis=-1)
|
| 216 |
+
|
| 217 |
+
if pred.shape[0] == 1:
|
| 218 |
+
return pred[0], jac[0]
|
| 219 |
+
return pred, jac
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# sl_7 (4p): L0 + A * N^(-α) / (1 + k * ln(S))
|
| 223 |
+
# theta: [L0, A, alpha, k]
|
| 224 |
+
def sl_7(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 225 |
+
ops = utils.get_ops(backend)
|
| 226 |
+
xp = ops.xp
|
| 227 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 228 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 229 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 230 |
+
S = ops.clamp_min(X[:, 1], 1.0)
|
| 231 |
+
L0, A, alpha, k = [theta[:, i] for i in range(4)]
|
| 232 |
+
Na = N[None, :] ** (-alpha[:, None])
|
| 233 |
+
logS = xp.log(S[None, :])
|
| 234 |
+
denom = 1.0 + k[:, None] * logS
|
| 235 |
+
denom = ops.clamp_min(denom, _EPS)
|
| 236 |
+
numer = A[:, None] * Na
|
| 237 |
+
frac = numer / denom
|
| 238 |
+
pred = L0[:, None] + frac
|
| 239 |
+
|
| 240 |
+
# Jacobian: d/d[L0, A, alpha, k]
|
| 241 |
+
ones = pred * 0.0 + 1.0
|
| 242 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 243 |
+
|
| 244 |
+
d_L0 = ones
|
| 245 |
+
d_A = Na / denom
|
| 246 |
+
# d(frac)/d(alpha) = A * d(Na)/d(alpha) / denom = A * (-Na*logN) / denom = -frac * logN
|
| 247 |
+
d_alpha = -frac * logN
|
| 248 |
+
# d(frac)/dk = -numer * logS / denom^2 = -frac * logS / denom
|
| 249 |
+
d_k = -frac * logS / denom
|
| 250 |
+
jac = ops.stack([d_L0, d_A, d_alpha, d_k], axis=-1)
|
| 251 |
+
|
| 252 |
+
if pred.shape[0] == 1:
|
| 253 |
+
return pred[0], jac[0]
|
| 254 |
+
return pred, jac
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
# sl_8 (4p): (a * N^b + c) / (1 + d * log(S))
|
| 258 |
+
# theta: [a, b, c, d]
|
| 259 |
+
def sl_8(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 260 |
+
ops = utils.get_ops(backend)
|
| 261 |
+
xp = ops.xp
|
| 262 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 263 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 264 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 265 |
+
S = ops.clamp_min(X[:, 1], 1.0)
|
| 266 |
+
a, b, c, d = [theta[:, i] for i in range(4)]
|
| 267 |
+
Nb = N[None, :] ** b[:, None]
|
| 268 |
+
logS = xp.log(S[None, :])
|
| 269 |
+
numer = a[:, None] * Nb + c[:, None]
|
| 270 |
+
denom = 1.0 + d[:, None] * logS
|
| 271 |
+
denom = ops.clamp_min(denom, _EPS)
|
| 272 |
+
pred = numer / denom
|
| 273 |
+
|
| 274 |
+
# Jacobian: d/d[a, b, c, d]
|
| 275 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 276 |
+
inv_denom = 1.0 / denom
|
| 277 |
+
|
| 278 |
+
d_a = Nb * inv_denom # d(numer)/da = N^b
|
| 279 |
+
d_b = a[:, None] * Nb * logN * inv_denom # d(numer)/db = a * N^b * logN
|
| 280 |
+
d_c = inv_denom # d(numer)/dc = 1
|
| 281 |
+
d_d = -numer * logS * inv_denom ** 2 # d(denom)/dd = logS → quotient rule
|
| 282 |
+
jac = ops.stack([d_a, d_b, d_c, d_d], axis=-1)
|
| 283 |
+
|
| 284 |
+
if pred.shape[0] == 1:
|
| 285 |
+
return pred[0], jac[0]
|
| 286 |
+
return pred, jac
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# sl_9 (3p): A * N^(-α) * S^(-β)
|
| 290 |
+
# theta: [A, alpha, beta]
|
| 291 |
+
def sl_9(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 292 |
+
ops = utils.get_ops(backend)
|
| 293 |
+
xp = ops.xp
|
| 294 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 295 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 296 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 297 |
+
S = ops.clamp_min(X[:, 1], _EPS)
|
| 298 |
+
A, alpha, beta = [theta[:, i] for i in range(3)]
|
| 299 |
+
Na = N[None, :] ** (-alpha[:, None])
|
| 300 |
+
Sb = S[None, :] ** (-beta[:, None])
|
| 301 |
+
pred = A[:, None] * Na * Sb
|
| 302 |
+
|
| 303 |
+
# Jacobian: d/d[A, alpha, beta]
|
| 304 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 305 |
+
logS = xp.log(ops.clamp_min(S[None, :], _EPS))
|
| 306 |
+
|
| 307 |
+
d_A = Na * Sb # pred / A
|
| 308 |
+
d_alpha = pred * (-logN) # d(N^(-alpha))/dalpha = -N^(-alpha)*logN
|
| 309 |
+
d_beta = pred * (-logS)
|
| 310 |
+
jac = ops.stack([d_A, d_alpha, d_beta], axis=-1)
|
| 311 |
+
|
| 312 |
+
if pred.shape[0] == 1:
|
| 313 |
+
return pred[0], jac[0]
|
| 314 |
+
return pred, jac
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
# sl_10 (4p): (A * N^(-α) + E) * S^(-β)
|
| 318 |
+
# theta: [A, alpha, E, beta]
|
| 319 |
+
def sl_10(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 320 |
+
ops = utils.get_ops(backend)
|
| 321 |
+
xp = ops.xp
|
| 322 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 323 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 324 |
+
N = ops.clamp_min(X[:, 0], _EPS)
|
| 325 |
+
S = ops.clamp_min(X[:, 1], _EPS)
|
| 326 |
+
A, alpha, E, beta = [theta[:, i] for i in range(4)]
|
| 327 |
+
Na = N[None, :] ** (-alpha[:, None])
|
| 328 |
+
Sb = S[None, :] ** (-beta[:, None])
|
| 329 |
+
bracket = A[:, None] * Na + E[:, None] # (A*N^(-alpha) + E)
|
| 330 |
+
pred = bracket * Sb
|
| 331 |
+
|
| 332 |
+
# Jacobian: d/d[A, alpha, E, beta]
|
| 333 |
+
logN = xp.log(ops.clamp_min(N[None, :], _EPS))
|
| 334 |
+
logS = xp.log(ops.clamp_min(S[None, :], _EPS))
|
| 335 |
+
|
| 336 |
+
d_A = Na * Sb
|
| 337 |
+
# d(bracket)/dalpha = A * (-Na * logN), then * Sb
|
| 338 |
+
d_alpha = A[:, None] * Na * (-logN) * Sb
|
| 339 |
+
d_E = Sb
|
| 340 |
+
# d(pred)/dbeta = bracket * d(Sb)/dbeta = bracket * (-Sb * logS) = -pred * logS
|
| 341 |
+
d_beta = -pred * logS
|
| 342 |
+
jac = ops.stack([d_A, d_alpha, d_E, d_beta], axis=-1)
|
| 343 |
+
|
| 344 |
+
if pred.shape[0] == 1:
|
| 345 |
+
return pred[0], jac[0]
|
| 346 |
+
return pred, jac
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
PARAM_BOUNDS = {
|
| 350 |
+
# Dataset: N ∈ [5.4e8, 4.4e9], log(N) ∈ [20.1, 22.2]; S ∈ {1,2,4,8}; loss ∈ [0.98, 2.11]
|
| 351 |
+
# Groups (pile/stack) have different loss floors; fit per group.
|
| 352 |
+
# sl_1: [c0, cN, alpha, cS, beta, cNS] — c0+cN*N^(-alpha)+cS*S^(-beta)+cNS*N^(-alpha)*S^(-beta)
|
| 353 |
+
# Fit pile: [1.38, 112.7, 0.260, 0.097, 0.399, 5.69]; stack: [0.763, 66.0, 0.263, 0.049, 0.470, 5.39]
|
| 354 |
+
"sl_1": [(-5, 5), (0, 500), (0, 2), (-50, 50), (0, 2), (-100, 100)],
|
| 355 |
+
# sl_2: [c0, cN, alpha, cS, beta] — c0 + cN*N^(-alpha) + cS*S^(-beta)
|
| 356 |
+
# Fit pile: [1.364, 117.8, 0.261, 0.121, 0.399]; stack: [0.750, 70.7, 0.264, 0.070, 0.472]
|
| 357 |
+
"sl_2": [(-5, 5), (0, 500), (0, 2), (-50, 50), (0, 2)],
|
| 358 |
+
# sl_3: [a, b, c, d] — a*N^b + c/(1+S) + d; b<0 required (more params → lower loss)
|
| 359 |
+
# Fit pile: [117.9, -0.261, 0.174, 1.398]; stack: [70.7, -0.264, 0.112, 0.764]
|
| 360 |
+
"sl_3": [(0, 500), (-2, 0), (-5, 5), (-5, 5)],
|
| 361 |
+
# sl_4: [a, b, c, d] — a*N^b + c*S^(-0.5) + d; b<0 required
|
| 362 |
+
# Fit pile: [117.9, -0.261, 0.105, 1.381]; stack: [70.7, -0.264, 0.068, 0.753]
|
| 363 |
+
"sl_4": [(0, 500), (-2, 0), (-5, 5), (-5, 5)],
|
| 364 |
+
# sl_5: [A, k, alpha, E] — (A/(N*(k*log(S)+1)))^alpha + E
|
| 365 |
+
# A must be ~N*constant: A/N_min ∈ [0.02, 1.85] for physical predictions
|
| 366 |
+
# Fit pile: [1.95e8, 0.334, 0.196, 1.291]; stack: [1.14e7, 0.396, 0.199, 0.709]
|
| 367 |
+
"sl_5": [(0, 1e9), (-100, 100), (0, 3), (-5, 5)],
|
| 368 |
+
# sl_6: [c0, c1, alpha, beta] — c0 + c1*(N^(-alpha) + S^(-beta))
|
| 369 |
+
# Fit pile: [-115.8, 117.3, 0.261, ~0]; stack: [-69.4, 70.2, 0.264, ~0]
|
| 370 |
+
# c0 hits -115 for pile (old -100 bound was too tight); beta≈0 (structural degeneracy)
|
| 371 |
+
"sl_6": [(-200, 200), (-200, 200), (0, 2), (0, 2)],
|
| 372 |
+
# sl_7: [L0, A, alpha, k] — L0 + A*N^(-alpha)/(1+k*ln(S))
|
| 373 |
+
# Fit pile: [1.315, 48.1, 0.204, 0.055]; stack: [0.728, 30.6, 0.211, 0.065]
|
| 374 |
+
"sl_7": [(-5, 5), (0, 500), (0, 2), (-50, 50)],
|
| 375 |
+
# sl_8: [a, b, c, d] — (a*N^b + c)/(1+d*log(S)); b<0 required
|
| 376 |
+
# Fit pile: [118.1, -0.260, 1.472, 0.017]; stack: [71.0, -0.263, 0.812, 0.020]
|
| 377 |
+
"sl_8": [(0, 500), (-2, 0), (-5, 5), (-10, 10)],
|
| 378 |
+
# sl_9: [A, alpha, beta] — A*N^(-alpha)*S^(-beta)
|
| 379 |
+
# Fit pile: [7.73, 0.065, 0.017]; stack: [4.45, 0.067, 0.020] (small exponents)
|
| 380 |
+
"sl_9": [(0, 5000), (0, 2), (0, 2)],
|
| 381 |
+
# sl_10: [A, alpha, E, beta] — (A*N^(-alpha) + E)*S^(-beta)
|
| 382 |
+
# Fit pile: [118.1, 0.260, 1.471, 0.017]; stack: [71.0, 0.263, 0.812, 0.020]
|
| 383 |
+
"sl_10": [(0, 500), (0, 2), (-5, 5), (0, 2)],
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
LAW_REGISTRY = {
|
| 387 |
+
"sl_1": sl_1, "sl_2": sl_2, "sl_3": sl_3, "sl_4": sl_4, "sl_5": sl_5,
|
| 388 |
+
"sl_6": sl_6, "sl_7": sl_7, "sl_8": sl_8, "sl_9": sl_9, "sl_10": sl_10,
|
| 389 |
+
}
|
| 390 |
+
PARAM_COUNTS = {
|
| 391 |
+
"sl_1": 6, "sl_2": 5, "sl_3": 4, "sl_4": 4, "sl_5": 4,
|
| 392 |
+
"sl_6": 4, "sl_7": 4, "sl_8": 4, "sl_9": 3, "sl_10": 4,
|
| 393 |
+
}
|
parallel_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16f73df39117534390df840862fba13e4ce032b08c24b31b0406258987d6613c
|
| 3 |
+
size 2117
|
parallel_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3bf76fabd8f7f5f1b90543b9cb9ef83db888c5674bdc9c4dc23277bd8a92f7b7
|
| 3 |
+
size 2434
|
registry.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Dataset registry: metadata, feature/target columns, and cost functions."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from typing import Callable, Dict, List, Optional
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class DatasetInfo:
|
| 11 |
+
name: str
|
| 12 |
+
feature_cols: List[str]
|
| 13 |
+
target_cols: List[str]
|
| 14 |
+
group_col: str = "group"
|
| 15 |
+
cost_fn: Optional[Callable[[dict], float]] = None
|
| 16 |
+
# Extra columns needed for cost but not used as features
|
| 17 |
+
cost_extra_cols: List[str] = field(default_factory=list)
|
| 18 |
+
# Per-dataset budget checkpoints (overrides runner default if set)
|
| 19 |
+
budget_checkpoints: Optional[List[float]] = None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _cost_data_constrained(row: dict) -> float:
|
| 23 |
+
return 6.0 * row["params"] * row["tokens"]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _cost_parallel(row: dict) -> float:
|
| 27 |
+
return float(row["num_params"])
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _cost_moe(row: dict) -> float:
|
| 31 |
+
return float(row["dense_parameter_count"]) * float(row["num_experts"])
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _cost_easy_question(row: dict) -> float:
|
| 35 |
+
return float(row["flops"])
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _cost_vocab(row: dict) -> float:
|
| 39 |
+
return float(row["non_vocab_parameters"]) * float(row["num_characters"])
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _cost_lr_bsz(row: dict) -> float:
|
| 43 |
+
return 6.0 * float(row["non_embedding_param_size"]) * float(row["data_size"])
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _cost_domain_mixture(row: dict) -> float:
|
| 47 |
+
return 1.0
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _cost_chinchilla(row: dict) -> float:
|
| 51 |
+
return 6.0 * float(row["N"]) * float(row["D"])
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _cost_farseer(row: dict) -> float:
|
| 55 |
+
return 6.0 * float(row["N"]) * float(row["D"])
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _cost_sft(row: dict) -> float:
|
| 59 |
+
return float(row["sft_data_size"])
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _cost_sae(row: dict) -> float:
|
| 63 |
+
return float(row["n"]) ** 1.6
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _cost_distillation(row: dict) -> float:
|
| 67 |
+
return 6.0 * float(row["NS"]) * float(row["DS"])
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _cost_sparsity(row: dict) -> float:
|
| 71 |
+
return 6.0 * row["N_dense"] * row["D1"] + 6.0 * row["N_active"] * row["D2"]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
DATASET_REGISTRY: Dict[str, DatasetInfo] = {
|
| 75 |
+
"data_constrained_scaling_law": DatasetInfo(
|
| 76 |
+
name="data_constrained_scaling_law",
|
| 77 |
+
feature_cols=["unique_tokens", "params", "tokens"],
|
| 78 |
+
target_cols=["loss"],
|
| 79 |
+
cost_fn=_cost_data_constrained,
|
| 80 |
+
),
|
| 81 |
+
"parallel_scaling_law": DatasetInfo(
|
| 82 |
+
name="parallel_scaling_law",
|
| 83 |
+
feature_cols=["num_params", "parallel_size"],
|
| 84 |
+
target_cols=["loss"],
|
| 85 |
+
cost_fn=_cost_parallel,
|
| 86 |
+
),
|
| 87 |
+
"moe_scaling_law": DatasetInfo(
|
| 88 |
+
name="moe_scaling_law",
|
| 89 |
+
feature_cols=["num_experts", "dense_parameter_count"],
|
| 90 |
+
target_cols=["loss_validation"],
|
| 91 |
+
cost_fn=_cost_moe,
|
| 92 |
+
),
|
| 93 |
+
"easy_question_scaling_law": DatasetInfo(
|
| 94 |
+
name="easy_question_scaling_law",
|
| 95 |
+
feature_cols=["log_flops"],
|
| 96 |
+
target_cols=["brier_score"],
|
| 97 |
+
cost_fn=_cost_easy_question,
|
| 98 |
+
cost_extra_cols=["flops"],
|
| 99 |
+
budget_checkpoints=[0.01, 0.05, 0.1]
|
| 100 |
+
),
|
| 101 |
+
"vocab_scaling_law": DatasetInfo(
|
| 102 |
+
name="vocab_scaling_law",
|
| 103 |
+
feature_cols=["non_vocab_parameters", "vocab_size", "num_characters"],
|
| 104 |
+
target_cols=["unigram_normalized_loss"],
|
| 105 |
+
cost_fn=_cost_vocab,
|
| 106 |
+
),
|
| 107 |
+
"lr_bsz_scaling_law": DatasetInfo(
|
| 108 |
+
name="lr_bsz_scaling_law",
|
| 109 |
+
feature_cols=["lr", "bsz", "data_size", "non_embedding_param_size"],
|
| 110 |
+
target_cols=["lm_loss"],
|
| 111 |
+
cost_fn=_cost_lr_bsz,
|
| 112 |
+
),
|
| 113 |
+
"domain_mixture_scaling_law": DatasetInfo(
|
| 114 |
+
name="domain_mixture_scaling_law",
|
| 115 |
+
feature_cols=[
|
| 116 |
+
"proportion_domain_1",
|
| 117 |
+
"proportion_domain_2",
|
| 118 |
+
"proportion_domain_3",
|
| 119 |
+
"proportion_domain_4",
|
| 120 |
+
"proportion_domain_5",
|
| 121 |
+
],
|
| 122 |
+
target_cols=[
|
| 123 |
+
"loss_domain_1",
|
| 124 |
+
"loss_domain_2",
|
| 125 |
+
"loss_domain_3",
|
| 126 |
+
"loss_domain_4",
|
| 127 |
+
"loss_domain_5",
|
| 128 |
+
],
|
| 129 |
+
cost_fn=_cost_domain_mixture,
|
| 130 |
+
budget_checkpoints=[0.2, 0.35, 0.5],
|
| 131 |
+
),
|
| 132 |
+
"chinchilla_scaling_law": DatasetInfo(
|
| 133 |
+
name="chinchilla_scaling_law",
|
| 134 |
+
feature_cols=["N", "D"],
|
| 135 |
+
target_cols=["loss"],
|
| 136 |
+
cost_fn=_cost_chinchilla,
|
| 137 |
+
),
|
| 138 |
+
"farseer_scaling_law": DatasetInfo(
|
| 139 |
+
name="farseer_scaling_law",
|
| 140 |
+
feature_cols=["N", "D"],
|
| 141 |
+
target_cols=["loss"],
|
| 142 |
+
cost_fn=_cost_farseer,
|
| 143 |
+
),
|
| 144 |
+
"sft_scaling_law": DatasetInfo(
|
| 145 |
+
name="sft_scaling_law",
|
| 146 |
+
feature_cols=["sft_data_size"],
|
| 147 |
+
target_cols=["sft_loss"],
|
| 148 |
+
cost_fn=_cost_sft,
|
| 149 |
+
),
|
| 150 |
+
"sae_scaling_law": DatasetInfo(
|
| 151 |
+
name="sae_scaling_law",
|
| 152 |
+
feature_cols=["n", "k"],
|
| 153 |
+
target_cols=["loss"],
|
| 154 |
+
cost_fn=_cost_sae,
|
| 155 |
+
),
|
| 156 |
+
"distillation_scaling_law": DatasetInfo(
|
| 157 |
+
name="distillation_scaling_law",
|
| 158 |
+
feature_cols=["NS", "DS", "LT"],
|
| 159 |
+
target_cols=["LS"],
|
| 160 |
+
cost_fn=_cost_distillation,
|
| 161 |
+
),
|
| 162 |
+
"sparsity_scaling_law": DatasetInfo(
|
| 163 |
+
name="sparsity_scaling_law",
|
| 164 |
+
feature_cols=["P", "N_active"],
|
| 165 |
+
target_cols=["loss"],
|
| 166 |
+
cost_fn=_cost_sparsity,
|
| 167 |
+
cost_extra_cols=["N_dense", "D1", "D2"],
|
| 168 |
+
budget_checkpoints=[0.2, 0.35, 0.5]
|
| 169 |
+
),
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def get_dataset_info(name: str) -> DatasetInfo:
|
| 174 |
+
if name not in DATASET_REGISTRY:
|
| 175 |
+
raise KeyError(f"Unknown dataset: {name}. Available: {list(DATASET_REGISTRY)}")
|
| 176 |
+
return DATASET_REGISTRY[name]
|
sparsity_scaling_law/__init__.py
ADDED
|
File without changes
|
sparsity_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scaling laws for MoE sparsity models.
|
| 2 |
+
|
| 3 |
+
X columns: [P (= N_total / N_active), N_active]
|
| 4 |
+
|
| 5 |
+
sl_1 (5 params): L = exp(d1) * P^(-a) * N_active^(-b) * exp(c * log(P) * log(N_active)) + exp(d3)
|
| 6 |
+
sl_2 (4 params): L = exp(d1) * P^(-a) * N_active^(-b) + exp(d3)
|
| 7 |
+
sl_3 (6 params): L = exp(d1) * P^(-a) + exp(d2) * N_active^(-b) * exp(c * log(P) * log(N_active)) + exp(d3)
|
| 8 |
+
sl_4 (5 params): L = exp(d1) * P^(-a) + exp(d2) * N_active^(-b) + exp(d3)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from typing import Literal
|
| 12 |
+
|
| 13 |
+
import benchmark.dataset.utils as utils
|
| 14 |
+
|
| 15 |
+
_EPS = 1e-12
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# sl_1 (5 params): [d1, a, b, c, d3]
|
| 19 |
+
# L = exp(d1) * P^(-a) * N_active^(-b) * exp(c * log(P) * log(N_active)) + exp(d3)
|
| 20 |
+
# = exp(d1 - a*log(P) - b*log(N) + c*log(P)*log(N)) + exp(d3)
|
| 21 |
+
# Let term = exp(d1 - a*log_P - b*log_N + c*log_P*log_N)
|
| 22 |
+
# Let base = exp(d3)
|
| 23 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 24 |
+
ops = utils.get_ops(backend)
|
| 25 |
+
xp = ops.xp
|
| 26 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 27 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 28 |
+
|
| 29 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 30 |
+
N_act = ops.clamp_min(X[:, 1], _EPS)
|
| 31 |
+
|
| 32 |
+
d1 = theta[:, 0]
|
| 33 |
+
a = theta[:, 1]
|
| 34 |
+
b = theta[:, 2]
|
| 35 |
+
c = theta[:, 3]
|
| 36 |
+
d3 = theta[:, 4]
|
| 37 |
+
|
| 38 |
+
log_P = xp.log(P)[None, :] # (1, M)
|
| 39 |
+
log_N = xp.log(N_act)[None, :] # (1, M)
|
| 40 |
+
|
| 41 |
+
term = (
|
| 42 |
+
ops.exp(d1[:, None])
|
| 43 |
+
* (P[None, :] ** (-a[:, None]))
|
| 44 |
+
* (N_act[None, :] ** (-b[:, None]))
|
| 45 |
+
* ops.exp(c[:, None] * log_P * log_N)
|
| 46 |
+
) # (B, M)
|
| 47 |
+
base = ops.exp(d3[:, None]) # (B, M)
|
| 48 |
+
pred = term + base # (B, M)
|
| 49 |
+
|
| 50 |
+
# Jacobian: (B, M, 5)
|
| 51 |
+
# term = exp(d1 - a*log_P - b*log_N + c*log_P*log_N)
|
| 52 |
+
# d(term)/d(d1) = term
|
| 53 |
+
# d(term)/d(a) = -term * log_P
|
| 54 |
+
# d(term)/d(b) = -term * log_N
|
| 55 |
+
# d(term)/d(c) = term * log_P * log_N
|
| 56 |
+
# d(base)/d(d3) = base
|
| 57 |
+
zeros = pred * 0.0
|
| 58 |
+
|
| 59 |
+
d_d1 = term
|
| 60 |
+
d_a = -term * log_P
|
| 61 |
+
d_b = -term * log_N
|
| 62 |
+
d_c = term * log_P * log_N
|
| 63 |
+
d_d3 = zeros + base
|
| 64 |
+
|
| 65 |
+
jac = ops.stack([d_d1, d_a, d_b, d_c, d_d3], axis=-1)
|
| 66 |
+
|
| 67 |
+
if pred.shape[0] == 1:
|
| 68 |
+
return pred[0], jac[0]
|
| 69 |
+
return pred, jac
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# sl_2 (4 params): [d1, a, b, d3]
|
| 73 |
+
# L = exp(d1) * P^(-a) * N_active^(-b) + exp(d3)
|
| 74 |
+
# = exp(d1 - a*log_P - b*log_N) + exp(d3)
|
| 75 |
+
def sl_2(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 76 |
+
ops = utils.get_ops(backend)
|
| 77 |
+
xp = ops.xp
|
| 78 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 79 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 80 |
+
|
| 81 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 82 |
+
N_act = ops.clamp_min(X[:, 1], _EPS)
|
| 83 |
+
|
| 84 |
+
d1 = theta[:, 0]
|
| 85 |
+
a = theta[:, 1]
|
| 86 |
+
b = theta[:, 2]
|
| 87 |
+
d3 = theta[:, 3]
|
| 88 |
+
|
| 89 |
+
log_P = xp.log(P)[None, :] # (1, M)
|
| 90 |
+
log_N = xp.log(N_act)[None, :] # (1, M)
|
| 91 |
+
|
| 92 |
+
term = (
|
| 93 |
+
ops.exp(d1[:, None])
|
| 94 |
+
* (P[None, :] ** (-a[:, None]))
|
| 95 |
+
* (N_act[None, :] ** (-b[:, None]))
|
| 96 |
+
) # (B, M)
|
| 97 |
+
base = ops.exp(d3[:, None]) # (B, M)
|
| 98 |
+
pred = term + base # (B, M)
|
| 99 |
+
|
| 100 |
+
# Jacobian: (B, M, 4)
|
| 101 |
+
# term = exp(d1 - a*log_P - b*log_N)
|
| 102 |
+
zeros = pred * 0.0
|
| 103 |
+
|
| 104 |
+
d_d1 = term
|
| 105 |
+
d_a = -term * log_P
|
| 106 |
+
d_b = -term * log_N
|
| 107 |
+
d_d3 = zeros + base
|
| 108 |
+
|
| 109 |
+
jac = ops.stack([d_d1, d_a, d_b, d_d3], axis=-1)
|
| 110 |
+
|
| 111 |
+
if pred.shape[0] == 1:
|
| 112 |
+
return pred[0], jac[0]
|
| 113 |
+
return pred, jac
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# sl_3 (6 params): [d1, a, d2, b, c, d3]
|
| 117 |
+
# L = exp(d1) * P^(-a) + exp(d2) * N_active^(-b) * exp(c * log(P) * log(N_active)) + exp(d3)
|
| 118 |
+
def sl_3(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 119 |
+
ops = utils.get_ops(backend)
|
| 120 |
+
xp = ops.xp
|
| 121 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 122 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 123 |
+
|
| 124 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 125 |
+
N_act = ops.clamp_min(X[:, 1], _EPS)
|
| 126 |
+
|
| 127 |
+
d1 = theta[:, 0]
|
| 128 |
+
a = theta[:, 1]
|
| 129 |
+
d2 = theta[:, 2]
|
| 130 |
+
b = theta[:, 3]
|
| 131 |
+
c = theta[:, 4]
|
| 132 |
+
d3 = theta[:, 5]
|
| 133 |
+
|
| 134 |
+
log_P = xp.log(P)[None, :] # (1, M)
|
| 135 |
+
log_N = xp.log(N_act)[None, :] # (1, M)
|
| 136 |
+
|
| 137 |
+
term_P = ops.exp(d1[:, None]) * (P[None, :] ** (-a[:, None])) # (B, M)
|
| 138 |
+
term_N = (
|
| 139 |
+
ops.exp(d2[:, None])
|
| 140 |
+
* (N_act[None, :] ** (-b[:, None]))
|
| 141 |
+
* ops.exp(c[:, None] * log_P * log_N)
|
| 142 |
+
) # (B, M)
|
| 143 |
+
base = ops.exp(d3[:, None]) # (B, M)
|
| 144 |
+
pred = term_P + term_N + base # (B, M)
|
| 145 |
+
|
| 146 |
+
# Jacobian: (B, M, 6)
|
| 147 |
+
# term_P = exp(d1 - a*log_P) => d/d(d1) = term_P, d/d(a) = -term_P*log_P
|
| 148 |
+
# term_N = exp(d2 - b*log_N + c*log_P*log_N) => d/d(d2)=term_N, d/d(b)=-term_N*log_N, d/d(c)=term_N*log_P*log_N
|
| 149 |
+
# base = exp(d3) => d/d(d3) = base
|
| 150 |
+
zeros = pred * 0.0
|
| 151 |
+
|
| 152 |
+
d_d1 = zeros + term_P
|
| 153 |
+
d_a = -term_P * log_P
|
| 154 |
+
d_d2 = zeros + term_N
|
| 155 |
+
d_b = -term_N * log_N
|
| 156 |
+
d_c = term_N * log_P * log_N
|
| 157 |
+
d_d3 = zeros + base
|
| 158 |
+
|
| 159 |
+
jac = ops.stack([d_d1, d_a, d_d2, d_b, d_c, d_d3], axis=-1)
|
| 160 |
+
|
| 161 |
+
if pred.shape[0] == 1:
|
| 162 |
+
return pred[0], jac[0]
|
| 163 |
+
return pred, jac
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
# sl_4 (5 params): [d1, a, d2, b, d3]
|
| 167 |
+
# L = exp(d1) * P^(-a) + exp(d2) * N_active^(-b) + exp(d3)
|
| 168 |
+
def sl_4(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 169 |
+
ops = utils.get_ops(backend)
|
| 170 |
+
xp = ops.xp
|
| 171 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 172 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 173 |
+
|
| 174 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 175 |
+
N_act = ops.clamp_min(X[:, 1], _EPS)
|
| 176 |
+
|
| 177 |
+
d1 = theta[:, 0]
|
| 178 |
+
a = theta[:, 1]
|
| 179 |
+
d2 = theta[:, 2]
|
| 180 |
+
b = theta[:, 3]
|
| 181 |
+
d3 = theta[:, 4]
|
| 182 |
+
|
| 183 |
+
log_P = xp.log(P)[None, :] # (1, M)
|
| 184 |
+
log_N = xp.log(N_act)[None, :] # (1, M)
|
| 185 |
+
|
| 186 |
+
term_P = ops.exp(d1[:, None]) * (P[None, :] ** (-a[:, None])) # (B, M)
|
| 187 |
+
term_N = ops.exp(d2[:, None]) * (N_act[None, :] ** (-b[:, None])) # (B, M)
|
| 188 |
+
base = ops.exp(d3[:, None]) # (B, M)
|
| 189 |
+
pred = term_P + term_N + base # (B, M)
|
| 190 |
+
|
| 191 |
+
# Jacobian: (B, M, 5)
|
| 192 |
+
zeros = pred * 0.0
|
| 193 |
+
|
| 194 |
+
d_d1 = zeros + term_P
|
| 195 |
+
d_a = -term_P * log_P
|
| 196 |
+
d_d2 = zeros + term_N
|
| 197 |
+
d_b = -term_N * log_N
|
| 198 |
+
d_d3 = zeros + base
|
| 199 |
+
|
| 200 |
+
jac = ops.stack([d_d1, d_a, d_d2, d_b, d_d3], axis=-1)
|
| 201 |
+
|
| 202 |
+
if pred.shape[0] == 1:
|
| 203 |
+
return pred[0], jac[0]
|
| 204 |
+
return pred, jac
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
LAW_REGISTRY = {"sl_1": sl_1, "sl_2": sl_2, "sl_3": sl_3, "sl_4": sl_4}
|
| 208 |
+
PARAM_COUNTS = {"sl_1": 5, "sl_2": 4, "sl_3": 6, "sl_4": 5}
|
| 209 |
+
|
| 210 |
+
# Data ranges:
|
| 211 |
+
# P in [1.86, 12.25], N_active in [0.015, 1.90], loss in [2.07, 3.40]
|
| 212 |
+
# d1, d2, d3: inside exp(), so reasonable range is (-5, 5)
|
| 213 |
+
# a, b: positive exponents, (0.01, 3.0)
|
| 214 |
+
# c: cross-term coefficient, (-1, 1)
|
| 215 |
+
PARAM_BOUNDS = {
|
| 216 |
+
"sl_1": [(-5.0, 5.0), (0.01, 3.0), (0.01, 3.0), (-1.0, 1.0), (-5.0, 5.0)],
|
| 217 |
+
"sl_2": [(-5.0, 5.0), (0.01, 3.0), (0.01, 3.0), (-5.0, 5.0)],
|
| 218 |
+
"sl_3": [(-5.0, 5.0), (0.01, 3.0), (-5.0, 5.0), (0.01, 3.0), (-1.0, 1.0), (-5.0, 5.0)],
|
| 219 |
+
"sl_4": [(-5.0, 5.0), (0.01, 3.0), (-5.0, 5.0), (0.01, 3.0), (-5.0, 5.0)],
|
| 220 |
+
}
|
sparsity_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:240f1b6315deee2167a84d1a29ebac1b349e0e0c40f41511e36ec6a42bb387c6
|
| 3 |
+
size 2441
|
sparsity_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a91b91ba7826bf262770ed6e7ab363274810819856e99981b5ad579576796127
|
| 3 |
+
size 3279
|
utils.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Literal
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
Backend = Literal["numpy", "jax", "torch"]
|
| 6 |
+
|
| 7 |
+
@dataclass
|
| 8 |
+
class BackendOps:
|
| 9 |
+
backend: Backend
|
| 10 |
+
xp: object
|
| 11 |
+
|
| 12 |
+
def asarray(self, x, atleast_2d: bool = False):
|
| 13 |
+
if self.backend == "torch":
|
| 14 |
+
if isinstance(x, self.xp.Tensor):
|
| 15 |
+
arr = x.to(dtype=self.xp.float64)
|
| 16 |
+
else:
|
| 17 |
+
arr = self.xp.as_tensor(x, dtype=self.xp.float64)
|
| 18 |
+
if atleast_2d and arr.ndim < 2:
|
| 19 |
+
if arr.ndim == 1:
|
| 20 |
+
arr = arr.unsqueeze(0)
|
| 21 |
+
else:
|
| 22 |
+
arr = arr.reshape(1, 1)
|
| 23 |
+
return arr
|
| 24 |
+
|
| 25 |
+
arr = self.xp.asarray(x, dtype=self.xp.float64)
|
| 26 |
+
if atleast_2d:
|
| 27 |
+
arr = self.xp.atleast_2d(arr)
|
| 28 |
+
return arr
|
| 29 |
+
|
| 30 |
+
def maximum(self, x, y):
|
| 31 |
+
return self.xp.maximum(x, y)
|
| 32 |
+
|
| 33 |
+
def minimum(self, x, y):
|
| 34 |
+
return self.xp.minimum(x, y)
|
| 35 |
+
|
| 36 |
+
def clamp(self, x, min=None, max=None):
|
| 37 |
+
if self.backend == "torch":
|
| 38 |
+
return self.xp.clamp(x, min=min, max=max)
|
| 39 |
+
if min is not None:
|
| 40 |
+
x = self.xp.maximum(x, min)
|
| 41 |
+
if max is not None:
|
| 42 |
+
x = self.xp.minimum(x, max)
|
| 43 |
+
return x
|
| 44 |
+
|
| 45 |
+
def clamp_min(self, x, min_value):
|
| 46 |
+
return self.maximum(x, min_value)
|
| 47 |
+
|
| 48 |
+
def clamp_max(self, x, max_value):
|
| 49 |
+
return self.minimum(x, max_value)
|
| 50 |
+
|
| 51 |
+
def exp(self, x):
|
| 52 |
+
return self.xp.exp(x)
|
| 53 |
+
|
| 54 |
+
def stack(self, arrays, axis=-1):
|
| 55 |
+
if self.backend == "torch":
|
| 56 |
+
return self.xp.stack(arrays, dim=axis)
|
| 57 |
+
return self.xp.stack(arrays, axis=axis)
|
| 58 |
+
|
| 59 |
+
def get_ops(backend: Backend) -> BackendOps:
|
| 60 |
+
if backend == "numpy":
|
| 61 |
+
xp = np
|
| 62 |
+
elif backend == "jax":
|
| 63 |
+
import jax.numpy as jnp
|
| 64 |
+
xp = jnp
|
| 65 |
+
elif backend == "torch":
|
| 66 |
+
import torch
|
| 67 |
+
xp = torch
|
| 68 |
+
else:
|
| 69 |
+
raise ValueError(f"Unsupported backend: {backend}")
|
| 70 |
+
|
| 71 |
+
return BackendOps(backend=backend, xp=xp)
|
vocab_scaling_law/laws.py
ADDED
|
@@ -0,0 +1,630 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Scaling laws for vocabulary size scaling.
|
| 2 |
+
|
| 3 |
+
X columns: [non_vocab_parameters (P), vocab_size (V), num_characters (D)]
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import Literal
|
| 7 |
+
|
| 8 |
+
import benchmark.dataset.utils as utils
|
| 9 |
+
|
| 10 |
+
_EPS = 1e-6
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# sl_1 (5p): c0 + A * V^b * P^e * D^g
|
| 14 |
+
# theta: [c0, A, b, e, g]
|
| 15 |
+
def sl_1(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 16 |
+
ops = utils.get_ops(backend)
|
| 17 |
+
xp = ops.xp
|
| 18 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 19 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 20 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 21 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 22 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 23 |
+
c0, A, b, e, g = [theta[:, i] for i in range(5)]
|
| 24 |
+
|
| 25 |
+
logP = xp.log(ops.clamp_min(P, _EPS))
|
| 26 |
+
logV = xp.log(ops.clamp_min(V, _EPS))
|
| 27 |
+
logD = xp.log(ops.clamp_min(D, _EPS))
|
| 28 |
+
|
| 29 |
+
V_b = V[None, :] ** b[:, None]
|
| 30 |
+
P_e = P[None, :] ** e[:, None]
|
| 31 |
+
D_g = D[None, :] ** g[:, None]
|
| 32 |
+
term = A[:, None] * V_b * P_e * D_g # A * V^b * P^e * D^g
|
| 33 |
+
|
| 34 |
+
pred = c0[:, None] + term
|
| 35 |
+
|
| 36 |
+
ones = pred * 0.0 + 1.0
|
| 37 |
+
|
| 38 |
+
# d/d(c0) = 1
|
| 39 |
+
d_c0 = ones
|
| 40 |
+
# d/d(A) = V^b * P^e * D^g
|
| 41 |
+
d_A = V_b * P_e * D_g
|
| 42 |
+
# d/d(b) = term * log(V)
|
| 43 |
+
d_b = term * logV[None, :]
|
| 44 |
+
# d/d(e) = term * log(P)
|
| 45 |
+
d_e = term * logP[None, :]
|
| 46 |
+
# d/d(g) = term * log(D)
|
| 47 |
+
d_g = term * logD[None, :]
|
| 48 |
+
|
| 49 |
+
jac = ops.stack([d_c0, d_A, d_b, d_e, d_g], axis=-1)
|
| 50 |
+
|
| 51 |
+
if pred.shape[0] == 1:
|
| 52 |
+
return pred[0], jac[0]
|
| 53 |
+
return pred, jac
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# sl_2 (7p): L + A * M_r(P^-alpha, D^-beta) * (1 + C * (log(V) - v0)^2)
|
| 57 |
+
# Generalized power mean with quadratic vocab gate
|
| 58 |
+
# M_r(x,y) = ((x^r + y^r)/2)^(1/r)
|
| 59 |
+
# theta: [L, A, alpha, beta, C, v0, r]
|
| 60 |
+
def sl_2(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 61 |
+
ops = utils.get_ops(backend)
|
| 62 |
+
xp = ops.xp
|
| 63 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 64 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 65 |
+
|
| 66 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 67 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 68 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 69 |
+
L, A, alpha, beta, C, v0, r = [theta[:, i] for i in range(7)]
|
| 70 |
+
|
| 71 |
+
logP = xp.log(P)
|
| 72 |
+
logD = xp.log(D)
|
| 73 |
+
logV = xp.log(V)
|
| 74 |
+
|
| 75 |
+
r_safe = ops.clamp_min(r, _EPS)
|
| 76 |
+
inv_r = 1.0 / r_safe
|
| 77 |
+
|
| 78 |
+
# ── All heavy lifting in log-space ──────────────────────────
|
| 79 |
+
# log(tp_r) = -alpha * r * log(P), log(td_r) = -beta * r * log(D)
|
| 80 |
+
log_tp_r = -alpha[:, None] * r_safe[:, None] * logP[None, :]
|
| 81 |
+
log_td_r = -beta[:, None] * r_safe[:, None] * logD[None, :]
|
| 82 |
+
|
| 83 |
+
# log(S) = log((tp_r + td_r)/2) via logsumexp
|
| 84 |
+
log_max = xp.maximum(log_tp_r, log_td_r)
|
| 85 |
+
log_sum_raw = log_max + xp.log(
|
| 86 |
+
xp.exp(log_tp_r - log_max) + xp.exp(log_td_r - log_max)
|
| 87 |
+
)
|
| 88 |
+
log_S = log_sum_raw - xp.log(2.0) # = log((tp_r+td_r)/2)
|
| 89 |
+
|
| 90 |
+
# mean_r = S^(1/r) = exp(log_S / r), clipped to prevent overflow
|
| 91 |
+
_LOG_CLIP = 50.0
|
| 92 |
+
log_mean_r = xp.clip(log_S * inv_r[:, None], -_LOG_CLIP, _LOG_CLIP)
|
| 93 |
+
mean_r = xp.exp(log_mean_r)
|
| 94 |
+
|
| 95 |
+
# ── Stable ratio via sigmoid ────────────────────────────────
|
| 96 |
+
# tp_r / (tp_r + td_r) = sigmoid(log_tp_r - log_td_r)
|
| 97 |
+
# td_r / (tp_r + td_r) = sigmoid(log_td_r - log_tp_r)
|
| 98 |
+
# Note: tp_r / (2*S) = tp_r / (tp_r + td_r) = sigmoid(diff)
|
| 99 |
+
diff = log_tp_r - log_td_r
|
| 100 |
+
diff_clip = xp.clip(diff, -_LOG_CLIP, _LOG_CLIP)
|
| 101 |
+
sig_p = 1.0 / (1.0 + xp.exp(-diff_clip)) # = tp_r/(tp_r+td_r)
|
| 102 |
+
sig_d = 1.0 - sig_p # = td_r/(tp_r+td_r)
|
| 103 |
+
|
| 104 |
+
# ── Vocab gate ──────────────────────────────────────────────
|
| 105 |
+
lV_diff = logV[None, :] - v0[:, None]
|
| 106 |
+
lV_diff2 = lV_diff ** 2
|
| 107 |
+
vocab_gate = 1.0 + C[:, None] * lV_diff2
|
| 108 |
+
|
| 109 |
+
# ── Prediction ──────────────────────────────────────────────
|
| 110 |
+
product = mean_r * vocab_gate
|
| 111 |
+
pred = L[:, None] + A[:, None] * product
|
| 112 |
+
|
| 113 |
+
# ── Jacobian ────────────────────────────────────────────────
|
| 114 |
+
d_L = xp.ones_like(pred)
|
| 115 |
+
d_A = product
|
| 116 |
+
|
| 117 |
+
# d/d(alpha): mean_r * tp_r/(2S) * logP = mean_r * sig_p * logP
|
| 118 |
+
d_alpha = A[:, None] * vocab_gate * (-mean_r * sig_p * logP[None, :])
|
| 119 |
+
|
| 120 |
+
# d/d(beta): mean_r * td_r/(2S) * logD = mean_r * sig_d * logD
|
| 121 |
+
d_beta = A[:, None] * vocab_gate * (-mean_r * sig_d * logD[None, :])
|
| 122 |
+
|
| 123 |
+
d_C = A[:, None] * mean_r * lV_diff2
|
| 124 |
+
d_v0 = A[:, None] * mean_r * (-2.0 * C[:, None] * lV_diff)
|
| 125 |
+
|
| 126 |
+
# d/d(r): dmr_dr = mean_r * [(-1/r²)*log_S + (1/r)*(sig_p*log_term_p + sig_d*log_term_d)]
|
| 127 |
+
# where log_term_p = -alpha*logP, log_term_d = -beta*logD
|
| 128 |
+
log_term_p = -alpha[:, None] * logP[None, :]
|
| 129 |
+
log_term_d = -beta[:, None] * logD[None, :]
|
| 130 |
+
dlogS_dr_over_r = sig_p * log_term_p + sig_d * log_term_d # = (1/S)*dS/dr
|
| 131 |
+
dmr_dr = mean_r * (
|
| 132 |
+
-inv_r[:, None] ** 2 * xp.clip(log_S, -_LOG_CLIP, _LOG_CLIP)
|
| 133 |
+
+ inv_r[:, None] * dlogS_dr_over_r
|
| 134 |
+
)
|
| 135 |
+
d_r = A[:, None] * vocab_gate * dmr_dr
|
| 136 |
+
|
| 137 |
+
jac = ops.stack([d_L, d_A, d_alpha, d_beta, d_C, d_v0, d_r], axis=-1)
|
| 138 |
+
if pred.shape[0] == 1:
|
| 139 |
+
return pred[0], jac[0]
|
| 140 |
+
return pred, jac
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
# sl_3 (7p): L0 + ((a * P^-alpha)^q + (b * (D * V^phi)^-beta)^q)^(1/q)
|
| 145 |
+
# Generalized q-mean of two Chinchilla-style terms
|
| 146 |
+
# theta: [L0, a, alpha, b, beta, phi, q]
|
| 147 |
+
def sl_3(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 148 |
+
ops = utils.get_ops(backend)
|
| 149 |
+
xp = ops.xp
|
| 150 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 151 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 152 |
+
|
| 153 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 154 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 155 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 156 |
+
L0, a, alpha, b, beta, phi, q = [theta[:, i] for i in range(7)]
|
| 157 |
+
|
| 158 |
+
logP = xp.log(P)
|
| 159 |
+
logV = xp.log(V)
|
| 160 |
+
logD = xp.log(D)
|
| 161 |
+
|
| 162 |
+
q_safe = ops.clamp_min(q, _EPS)
|
| 163 |
+
inv_q = 1.0 / q_safe
|
| 164 |
+
|
| 165 |
+
_LOG_CLIP = 50.0
|
| 166 |
+
|
| 167 |
+
# ── Log-space for t1, t2 ───────────────────────────────────
|
| 168 |
+
# t1 = a * P^(-alpha) => log_t1 = log(a) + (-alpha)*log(P)
|
| 169 |
+
log_a = xp.log(ops.clamp_min(a, _EPS))
|
| 170 |
+
log_b = xp.log(ops.clamp_min(b, _EPS))
|
| 171 |
+
|
| 172 |
+
log_t1 = log_a[:, None] + (-alpha[:, None]) * logP[None, :]
|
| 173 |
+
|
| 174 |
+
# t2 = b * (D * V^phi)^(-beta)
|
| 175 |
+
# log_t2 = log(b) + (-beta)*(log(D) + phi*log(V))
|
| 176 |
+
log_eff_D = logD[None, :] + phi[:, None] * logV[None, :]
|
| 177 |
+
log_t2 = log_b[:, None] + (-beta[:, None]) * log_eff_D
|
| 178 |
+
|
| 179 |
+
# ── Log-space for S = t1^q + t2^q ─────────────────────────
|
| 180 |
+
# log_t1_q = q * log_t1, log_t2_q = q * log_t2
|
| 181 |
+
log_t1_q = q_safe[:, None] * log_t1
|
| 182 |
+
log_t2_q = q_safe[:, None] * log_t2
|
| 183 |
+
|
| 184 |
+
# log_S = logsumexp(log_t1_q, log_t2_q)
|
| 185 |
+
log_max = xp.maximum(log_t1_q, log_t2_q)
|
| 186 |
+
log_S = log_max + xp.log(
|
| 187 |
+
xp.exp(log_t1_q - log_max) + xp.exp(log_t2_q - log_max)
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
# combined = S^(1/q) = exp(log_S / q)
|
| 191 |
+
log_combined = xp.clip(log_S * inv_q[:, None], -_LOG_CLIP, _LOG_CLIP)
|
| 192 |
+
combined = xp.exp(log_combined)
|
| 193 |
+
|
| 194 |
+
# ── Stable ratios via sigmoid ──────────────────────────────
|
| 195 |
+
# t1_q / S = sigmoid(log_t1_q - log_t2_q)
|
| 196 |
+
# t2_q / S = sigmoid(log_t2_q - log_t1_q)
|
| 197 |
+
diff = xp.clip(log_t1_q - log_t2_q, -_LOG_CLIP, _LOG_CLIP)
|
| 198 |
+
sig_1 = 1.0 / (1.0 + xp.exp(-diff)) # = t1^q / S
|
| 199 |
+
sig_2 = 1.0 - sig_1 # = t2^q / S
|
| 200 |
+
|
| 201 |
+
# ── Prediction ─────────────────────────────────────────────
|
| 202 |
+
pred = L0[:, None] + combined
|
| 203 |
+
|
| 204 |
+
# ── Jacobian ───────────────────────────────────────────────
|
| 205 |
+
# Shared factor: d(combined)/d(S) * d(S)/d(t_k^q) * d(t_k^q)/d(...)
|
| 206 |
+
# d(combined)/d(S) = combined / (q * S)
|
| 207 |
+
# d(S)/d(t1_q) = 1
|
| 208 |
+
# d(t1_q)/d(theta) = t1_q * q * d(log_t1)/d(theta)
|
| 209 |
+
#
|
| 210 |
+
# Chain: d(combined)/d(log_t1) = combined / (q*S) * t1_q * q
|
| 211 |
+
# = combined * (t1_q / S)
|
| 212 |
+
# = combined * sig_1
|
| 213 |
+
# Similarly for t2.
|
| 214 |
+
|
| 215 |
+
d_L0 = xp.ones_like(pred)
|
| 216 |
+
|
| 217 |
+
# d/d(a): d(log_t1)/d(a) = 1/a
|
| 218 |
+
# => d(combined)/d(a) = combined * sig_1 * q * (1/a)
|
| 219 |
+
d_a = combined * sig_1 * q_safe[:, None] / a[:, None]
|
| 220 |
+
|
| 221 |
+
# d/d(alpha): d(log_t1)/d(alpha) = -logP
|
| 222 |
+
# => d(t1_q)/d(alpha) = t1_q * q * (-logP)
|
| 223 |
+
# => d(combined)/d(alpha) = combined * sig_1 * q * (-logP)
|
| 224 |
+
d_alpha = combined * sig_1 * q_safe[:, None] * (-logP[None, :])
|
| 225 |
+
|
| 226 |
+
# d/d(b): d(log_t2)/d(b) = 1/b
|
| 227 |
+
d_b = combined * sig_2 * q_safe[:, None] / b[:, None]
|
| 228 |
+
|
| 229 |
+
# d/d(beta): d(log_t2)/d(beta) = -log_eff_D
|
| 230 |
+
d_beta = combined * sig_2 * q_safe[:, None] * (-log_eff_D)
|
| 231 |
+
|
| 232 |
+
# d/d(phi): d(log_t2)/d(phi) = (-beta) * logV
|
| 233 |
+
d_phi = combined * sig_2 * q_safe[:, None] * (-beta[:, None]) * logV[None, :]
|
| 234 |
+
|
| 235 |
+
# d/d(q):
|
| 236 |
+
# combined = S^(1/q), log(combined) = log_S / q
|
| 237 |
+
# d(combined)/d(q) = combined * [ -log_S/q² + (1/q)(1/S)*dS/dq ]
|
| 238 |
+
#
|
| 239 |
+
# dS/dq = t1_q * log_t1 + t2_q * log_t2
|
| 240 |
+
# (1/S)*dS/dq = sig_1 * log_t1 + sig_2 * log_t2
|
| 241 |
+
#
|
| 242 |
+
# Use clipped log values throughout
|
| 243 |
+
log_t1_clip = xp.clip(log_t1, -_LOG_CLIP, _LOG_CLIP)
|
| 244 |
+
log_t2_clip = xp.clip(log_t2, -_LOG_CLIP, _LOG_CLIP)
|
| 245 |
+
log_S_clip = xp.clip(log_S, -_LOG_CLIP, _LOG_CLIP)
|
| 246 |
+
|
| 247 |
+
inv_S_dS_dq = sig_1 * log_t1_clip + sig_2 * log_t2_clip
|
| 248 |
+
|
| 249 |
+
d_q = combined * (
|
| 250 |
+
-inv_q[:, None] ** 2 * log_S_clip
|
| 251 |
+
+ inv_q[:, None] * inv_S_dS_dq
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
jac = ops.stack([d_L0, d_a, d_alpha, d_b, d_beta, d_phi, d_q], axis=-1)
|
| 255 |
+
|
| 256 |
+
if pred.shape[0] == 1:
|
| 257 |
+
return pred[0], jac[0]
|
| 258 |
+
return pred, jac
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
_LOG_CLIP = 50.0
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
# sl_4 (7p): L_inf + A * max(P^a, lambda*D^b)^(-d) * V^(-g)
|
| 265 |
+
# theta: [L_inf, A, a, b, d, lam, g]
|
| 266 |
+
def sl_4(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 267 |
+
ops = utils.get_ops(backend)
|
| 268 |
+
xp = ops.xp
|
| 269 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 270 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 271 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 272 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 273 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 274 |
+
L_inf, A_p, a, b, d_p, lam, g = [theta[:, i] for i in range(7)]
|
| 275 |
+
|
| 276 |
+
logP = xp.log(P)
|
| 277 |
+
logD = xp.log(D)
|
| 278 |
+
logV = xp.log(V)
|
| 279 |
+
|
| 280 |
+
# log(P^a) = a*logP, log(lam*D^b) = log(lam) + b*logD
|
| 281 |
+
log_t1 = a[:, None] * logP[None, :]
|
| 282 |
+
log_lam = xp.log(ops.clamp_min(lam, _EPS))
|
| 283 |
+
log_t2 = log_lam[:, None] + b[:, None] * logD[None, :]
|
| 284 |
+
log_max = xp.maximum(log_t1, log_t2)
|
| 285 |
+
|
| 286 |
+
# term = A * max(...)^(-d) * V^(-g) = A * exp(-d*log_max - g*logV)
|
| 287 |
+
log_term = xp.log(ops.clamp_min(A_p, _EPS))[:, None] \
|
| 288 |
+
- d_p[:, None] * log_max - g[:, None] * logV[None, :]
|
| 289 |
+
log_term = xp.clip(log_term, -_LOG_CLIP, _LOG_CLIP)
|
| 290 |
+
term = xp.exp(log_term)
|
| 291 |
+
|
| 292 |
+
pred = L_inf[:, None] + term
|
| 293 |
+
|
| 294 |
+
# Indicator for which branch is active
|
| 295 |
+
ind1 = (log_t1 >= log_t2) * 1.0
|
| 296 |
+
ind2 = 1.0 - ind1
|
| 297 |
+
|
| 298 |
+
d_L_inf = xp.ones_like(pred)
|
| 299 |
+
# d/dA = term / A
|
| 300 |
+
d_A = term / ops.clamp_min(A_p, _EPS)[:, None]
|
| 301 |
+
# d/da = term * (-d) * ind1 * logP
|
| 302 |
+
d_a = term * (-d_p[:, None]) * ind1 * logP[None, :]
|
| 303 |
+
# d/db = term * (-d) * ind2 * logD
|
| 304 |
+
d_b = term * (-d_p[:, None]) * ind2 * logD[None, :]
|
| 305 |
+
# d/dd = term * (-log_max)
|
| 306 |
+
d_d = term * (-log_max)
|
| 307 |
+
# d/dlam = term * (-d) * ind2 / lam
|
| 308 |
+
d_lam = term * (-d_p[:, None]) * ind2 / ops.clamp_min(lam, _EPS)[:, None]
|
| 309 |
+
# d/dg = term * (-logV)
|
| 310 |
+
d_g = term * (-logV[None, :])
|
| 311 |
+
|
| 312 |
+
jac = ops.stack([d_L_inf, d_A, d_a, d_b, d_d, d_lam, d_g], axis=-1)
|
| 313 |
+
if pred.shape[0] == 1:
|
| 314 |
+
return pred[0], jac[0]
|
| 315 |
+
return pred, jac
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
# sl_5 (7p): p0 * P^p1 * V^p2 * D^p3 + p4 * P^p5 + p6
|
| 319 |
+
# theta: [p0, p1, p2, p3, p4, p5, p6]
|
| 320 |
+
def sl_5(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 321 |
+
ops = utils.get_ops(backend)
|
| 322 |
+
xp = ops.xp
|
| 323 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 324 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 325 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 326 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 327 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 328 |
+
p0, p1, p2, p3, p4, p5, p6 = [theta[:, i] for i in range(7)]
|
| 329 |
+
|
| 330 |
+
logP = xp.log(P)
|
| 331 |
+
logV = xp.log(V)
|
| 332 |
+
logD = xp.log(D)
|
| 333 |
+
|
| 334 |
+
# t1 = p0 * P^p1 * V^p2 * D^p3
|
| 335 |
+
# log|t1| = log|p0| + p1*logP + p2*logV + p3*logD
|
| 336 |
+
log_t1_abs = xp.log(ops.clamp_min(xp.abs(p0), _EPS))[:, None] \
|
| 337 |
+
+ p1[:, None] * logP[None, :] \
|
| 338 |
+
+ p2[:, None] * logV[None, :] \
|
| 339 |
+
+ p3[:, None] * logD[None, :]
|
| 340 |
+
log_t1_abs = xp.clip(log_t1_abs, -_LOG_CLIP, _LOG_CLIP)
|
| 341 |
+
sign_p0 = xp.sign(p0)
|
| 342 |
+
t1 = sign_p0[:, None] * xp.exp(log_t1_abs)
|
| 343 |
+
|
| 344 |
+
# t2 = p4 * P^p5
|
| 345 |
+
log_t2_abs = xp.log(ops.clamp_min(xp.abs(p4), _EPS))[:, None] \
|
| 346 |
+
+ p5[:, None] * logP[None, :]
|
| 347 |
+
log_t2_abs = xp.clip(log_t2_abs, -_LOG_CLIP, _LOG_CLIP)
|
| 348 |
+
sign_p4 = xp.sign(p4)
|
| 349 |
+
t2 = sign_p4[:, None] * xp.exp(log_t2_abs)
|
| 350 |
+
|
| 351 |
+
pred = t1 + t2 + p6[:, None]
|
| 352 |
+
|
| 353 |
+
# Power-law parts (unsigned, for Jacobian): P^p1*V^p2*D^p3, P^p5
|
| 354 |
+
pvd = xp.exp(xp.clip(
|
| 355 |
+
p1[:, None] * logP[None, :] + p2[:, None] * logV[None, :] + p3[:, None] * logD[None, :],
|
| 356 |
+
-_LOG_CLIP, _LOG_CLIP))
|
| 357 |
+
pp5 = xp.exp(xp.clip(p5[:, None] * logP[None, :], -_LOG_CLIP, _LOG_CLIP))
|
| 358 |
+
|
| 359 |
+
d_p0 = sign_p0[:, None] * pvd # preserves sign correctly when p0 < 0
|
| 360 |
+
d_p1 = t1 * logP[None, :]
|
| 361 |
+
d_p2 = t1 * logV[None, :]
|
| 362 |
+
d_p3 = t1 * logD[None, :]
|
| 363 |
+
d_p4 = sign_p4[:, None] * pp5
|
| 364 |
+
d_p5 = t2 * logP[None, :]
|
| 365 |
+
d_p6 = xp.ones_like(pred)
|
| 366 |
+
|
| 367 |
+
jac = ops.stack([d_p0, d_p1, d_p2, d_p3, d_p4, d_p5, d_p6], axis=-1)
|
| 368 |
+
if pred.shape[0] == 1:
|
| 369 |
+
return pred[0], jac[0]
|
| 370 |
+
return pred, jac
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
# sl_6 (7p): A * (P * V^k1)^(-alpha) + B * (D * V^k2)^(-beta) + c0
|
| 374 |
+
# theta: [A, alpha, k1, B, beta, k2, c0]
|
| 375 |
+
def sl_6(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 376 |
+
ops = utils.get_ops(backend)
|
| 377 |
+
xp = ops.xp
|
| 378 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 379 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 380 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 381 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 382 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 383 |
+
A, alpha, k1, B, beta, k2, c0 = [theta[:, i] for i in range(7)]
|
| 384 |
+
|
| 385 |
+
logP = xp.log(P)
|
| 386 |
+
logV = xp.log(V)
|
| 387 |
+
logD = xp.log(D)
|
| 388 |
+
|
| 389 |
+
# log_eff_P = logP + k1*logV, log_eff_D = logD + k2*logV
|
| 390 |
+
log_eff_P = logP[None, :] + k1[:, None] * logV[None, :]
|
| 391 |
+
log_eff_D = logD[None, :] + k2[:, None] * logV[None, :]
|
| 392 |
+
|
| 393 |
+
# term1 = A * exp(-alpha * log_eff_P)
|
| 394 |
+
log_term1 = xp.log(ops.clamp_min(A, _EPS))[:, None] \
|
| 395 |
+
- alpha[:, None] * log_eff_P
|
| 396 |
+
log_term1 = xp.clip(log_term1, -_LOG_CLIP, _LOG_CLIP)
|
| 397 |
+
term1 = xp.exp(log_term1)
|
| 398 |
+
|
| 399 |
+
# term2 = B * exp(-beta * log_eff_D)
|
| 400 |
+
log_term2 = xp.log(ops.clamp_min(B, _EPS))[:, None] \
|
| 401 |
+
- beta[:, None] * log_eff_D
|
| 402 |
+
log_term2 = xp.clip(log_term2, -_LOG_CLIP, _LOG_CLIP)
|
| 403 |
+
term2 = xp.exp(log_term2)
|
| 404 |
+
|
| 405 |
+
pred = term1 + term2 + c0[:, None]
|
| 406 |
+
|
| 407 |
+
d_A = term1 / ops.clamp_min(A, _EPS)[:, None]
|
| 408 |
+
d_alpha = -term1 * log_eff_P
|
| 409 |
+
d_k1 = -term1 * alpha[:, None] * logV[None, :]
|
| 410 |
+
d_B = term2 / ops.clamp_min(B, _EPS)[:, None]
|
| 411 |
+
d_beta = -term2 * log_eff_D
|
| 412 |
+
d_k2 = -term2 * beta[:, None] * logV[None, :]
|
| 413 |
+
d_c0 = xp.ones_like(pred)
|
| 414 |
+
|
| 415 |
+
jac = ops.stack([d_A, d_alpha, d_k1, d_B, d_beta, d_k2, d_c0], axis=-1)
|
| 416 |
+
if pred.shape[0] == 1:
|
| 417 |
+
return pred[0], jac[0]
|
| 418 |
+
return pred, jac
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
# sl_7 (7p): A * P^(-alpha) * D^(-beta) + B * V^gamma * D^(-delta) + c0
|
| 422 |
+
# theta: [A, alpha, beta, B, gamma, delta, c0]
|
| 423 |
+
def sl_7(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 424 |
+
ops = utils.get_ops(backend)
|
| 425 |
+
xp = ops.xp
|
| 426 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 427 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 428 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 429 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 430 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 431 |
+
A, alpha, beta, B, gamma, delta, c0 = [theta[:, i] for i in range(7)]
|
| 432 |
+
|
| 433 |
+
logP = xp.log(P)
|
| 434 |
+
logV = xp.log(V)
|
| 435 |
+
logD = xp.log(D)
|
| 436 |
+
|
| 437 |
+
# t1 = A * P^(-alpha) * D^(-beta) = A * exp(-alpha*logP - beta*logD)
|
| 438 |
+
log_t1 = xp.log(ops.clamp_min(A, _EPS))[:, None] \
|
| 439 |
+
- alpha[:, None] * logP[None, :] \
|
| 440 |
+
- beta[:, None] * logD[None, :]
|
| 441 |
+
log_t1 = xp.clip(log_t1, -_LOG_CLIP, _LOG_CLIP)
|
| 442 |
+
t1 = xp.exp(log_t1)
|
| 443 |
+
|
| 444 |
+
# t2 = B * V^gamma * D^(-delta) = B * exp(gamma*logV - delta*logD)
|
| 445 |
+
log_t2 = xp.log(ops.clamp_min(B, _EPS))[:, None] \
|
| 446 |
+
+ gamma[:, None] * logV[None, :] \
|
| 447 |
+
- delta[:, None] * logD[None, :]
|
| 448 |
+
log_t2 = xp.clip(log_t2, -_LOG_CLIP, _LOG_CLIP)
|
| 449 |
+
t2 = xp.exp(log_t2)
|
| 450 |
+
|
| 451 |
+
pred = t1 + t2 + c0[:, None]
|
| 452 |
+
|
| 453 |
+
d_A = t1 / ops.clamp_min(A, _EPS)[:, None]
|
| 454 |
+
d_alpha = -t1 * logP[None, :]
|
| 455 |
+
d_beta = -t1 * logD[None, :]
|
| 456 |
+
d_B = t2 / ops.clamp_min(B, _EPS)[:, None]
|
| 457 |
+
d_gamma = t2 * logV[None, :]
|
| 458 |
+
d_delta = -t2 * logD[None, :]
|
| 459 |
+
d_c0 = xp.ones_like(pred)
|
| 460 |
+
|
| 461 |
+
jac = ops.stack([d_A, d_alpha, d_beta, d_B, d_gamma, d_delta, d_c0], axis=-1)
|
| 462 |
+
if pred.shape[0] == 1:
|
| 463 |
+
return pred[0], jac[0]
|
| 464 |
+
return pred, jac
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
# sl_8 (7p): c0 + c1*log(V) + V^beta * (c2 * P^(-alpha) + c3 * D^(-gamma))
|
| 468 |
+
# theta: [c0, c1, c2, alpha, c3, gamma, beta]
|
| 469 |
+
def sl_8(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 470 |
+
ops = utils.get_ops(backend)
|
| 471 |
+
xp = ops.xp
|
| 472 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 473 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 474 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 475 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 476 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 477 |
+
c0, c1, c2, alpha, c3, gamma, beta = [theta[:, i] for i in range(7)]
|
| 478 |
+
|
| 479 |
+
logP = xp.log(P)
|
| 480 |
+
logV = xp.log(V)
|
| 481 |
+
logD = xp.log(D)
|
| 482 |
+
|
| 483 |
+
# V^beta * c2 * P^(-alpha) = c2 * exp(beta*logV - alpha*logP)
|
| 484 |
+
log_vp = beta[:, None] * logV[None, :] - alpha[:, None] * logP[None, :]
|
| 485 |
+
log_vp = xp.clip(log_vp, -_LOG_CLIP, _LOG_CLIP)
|
| 486 |
+
vp = xp.exp(log_vp) # V^beta * P^(-alpha)
|
| 487 |
+
|
| 488 |
+
# V^beta * c3 * D^(-gamma) = c3 * exp(beta*logV - gamma*logD)
|
| 489 |
+
log_vd = beta[:, None] * logV[None, :] - gamma[:, None] * logD[None, :]
|
| 490 |
+
log_vd = xp.clip(log_vd, -_LOG_CLIP, _LOG_CLIP)
|
| 491 |
+
vd = xp.exp(log_vd) # V^beta * D^(-gamma)
|
| 492 |
+
|
| 493 |
+
scaled = c2[:, None] * vp + c3[:, None] * vd
|
| 494 |
+
pred = c0[:, None] + c1[:, None] * logV[None, :] + scaled
|
| 495 |
+
|
| 496 |
+
d_c0 = xp.ones_like(pred)
|
| 497 |
+
d_c1 = xp.broadcast_to(logV[None, :], pred.shape) + 0.0 # force copy
|
| 498 |
+
d_c2 = vp
|
| 499 |
+
d_alpha = c2[:, None] * vp * (-logP[None, :])
|
| 500 |
+
d_c3 = vd
|
| 501 |
+
d_gamma = c3[:, None] * vd * (-logD[None, :])
|
| 502 |
+
d_beta = scaled * logV[None, :]
|
| 503 |
+
|
| 504 |
+
jac = ops.stack([d_c0, d_c1, d_c2, d_alpha, d_c3, d_gamma, d_beta], axis=-1)
|
| 505 |
+
if pred.shape[0] == 1:
|
| 506 |
+
return pred[0], jac[0]
|
| 507 |
+
return pred, jac
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
# sl_9 (7p): A * P^(-alpha) * D^(-beta) * (1 + gamma*log(V)) + delta * V^epsilon + L_inf
|
| 511 |
+
# theta: [A, alpha, beta, gamma, delta, epsilon, L_inf]
|
| 512 |
+
def sl_9(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 513 |
+
ops = utils.get_ops(backend)
|
| 514 |
+
xp = ops.xp
|
| 515 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 516 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 517 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 518 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 519 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 520 |
+
A, alpha, beta, gamma, delta, epsilon, L_inf = [theta[:, i] for i in range(7)]
|
| 521 |
+
|
| 522 |
+
logP = xp.log(P)
|
| 523 |
+
logV = xp.log(V)
|
| 524 |
+
logD = xp.log(D)
|
| 525 |
+
|
| 526 |
+
# main = A * P^(-alpha) * D^(-beta) = A * exp(-alpha*logP - beta*logD)
|
| 527 |
+
log_main = xp.log(ops.clamp_min(A, _EPS))[:, None] \
|
| 528 |
+
- alpha[:, None] * logP[None, :] \
|
| 529 |
+
- beta[:, None] * logD[None, :]
|
| 530 |
+
log_main = xp.clip(log_main, -_LOG_CLIP, _LOG_CLIP)
|
| 531 |
+
main = xp.exp(log_main)
|
| 532 |
+
|
| 533 |
+
vocab_mod = 1.0 + gamma[:, None] * logV[None, :]
|
| 534 |
+
|
| 535 |
+
# cross = delta * V^epsilon = delta * exp(epsilon*logV)
|
| 536 |
+
log_cross = xp.log(ops.clamp_min(xp.abs(delta), _EPS))[:, None] \
|
| 537 |
+
+ epsilon[:, None] * logV[None, :]
|
| 538 |
+
log_cross = xp.clip(log_cross, -_LOG_CLIP, _LOG_CLIP)
|
| 539 |
+
sign_delta = xp.sign(delta)
|
| 540 |
+
cross = sign_delta[:, None] * xp.exp(log_cross)
|
| 541 |
+
|
| 542 |
+
pred = main * vocab_mod + cross + L_inf[:, None]
|
| 543 |
+
|
| 544 |
+
d_A = main / ops.clamp_min(A, _EPS)[:, None] * vocab_mod
|
| 545 |
+
d_alpha = main * (-logP[None, :]) * vocab_mod
|
| 546 |
+
d_beta = main * (-logD[None, :]) * vocab_mod
|
| 547 |
+
d_gamma = main * logV[None, :]
|
| 548 |
+
d_delta = cross / (sign_delta[:, None] * ops.clamp_min(xp.abs(delta), _EPS)[:, None] + 1e-300) \
|
| 549 |
+
* sign_delta[:, None] # = V^epsilon with correct sign handling
|
| 550 |
+
# Simpler: d/d(delta) = V^epsilon
|
| 551 |
+
v_eps = xp.exp(xp.clip(epsilon[:, None] * logV[None, :], -_LOG_CLIP, _LOG_CLIP))
|
| 552 |
+
d_delta = v_eps
|
| 553 |
+
d_epsilon = cross * logV[None, :]
|
| 554 |
+
d_L_inf = xp.ones_like(pred)
|
| 555 |
+
|
| 556 |
+
jac = ops.stack([d_A, d_alpha, d_beta, d_gamma, d_delta, d_epsilon, d_L_inf], axis=-1)
|
| 557 |
+
if pred.shape[0] == 1:
|
| 558 |
+
return pred[0], jac[0]
|
| 559 |
+
return pred, jac
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
# sl_10 (7p): L_min + exp(a + bP*log(P) + bV1*log(V) + bV2*log(V)^2 + bD*log(D) + bVD*log(V)*log(D))
|
| 563 |
+
# theta: [L_min, a, bP, bV1, bV2, bD, bVD]
|
| 564 |
+
def sl_10(theta, X, backend: Literal["numpy", "jax", "torch"] = "jax"):
|
| 565 |
+
ops = utils.get_ops(backend)
|
| 566 |
+
xp = ops.xp
|
| 567 |
+
X = ops.asarray(X, atleast_2d=True)
|
| 568 |
+
theta = ops.asarray(theta, atleast_2d=True)
|
| 569 |
+
P = ops.clamp_min(X[:, 0], _EPS)
|
| 570 |
+
V = ops.clamp_min(X[:, 1], _EPS)
|
| 571 |
+
D = ops.clamp_min(X[:, 2], _EPS)
|
| 572 |
+
L_min, a, bP, bV1, bV2, bD, bVD = [theta[:, i] for i in range(7)]
|
| 573 |
+
|
| 574 |
+
lP = xp.log(P[None, :])
|
| 575 |
+
lV = xp.log(V[None, :])
|
| 576 |
+
lD = xp.log(D[None, :])
|
| 577 |
+
|
| 578 |
+
exponent = (a[:, None] + bP[:, None] * lP + bV1[:, None] * lV
|
| 579 |
+
+ bV2[:, None] * lV ** 2 + bD[:, None] * lD
|
| 580 |
+
+ bVD[:, None] * lV * lD)
|
| 581 |
+
exponent = xp.clip(exponent, -_LOG_CLIP, _LOG_CLIP)
|
| 582 |
+
exp_val = xp.exp(exponent)
|
| 583 |
+
|
| 584 |
+
pred = L_min[:, None] + exp_val
|
| 585 |
+
|
| 586 |
+
d_L_min = xp.ones_like(pred)
|
| 587 |
+
d_a = exp_val
|
| 588 |
+
d_bP = exp_val * lP
|
| 589 |
+
d_bV1 = exp_val * lV
|
| 590 |
+
d_bV2 = exp_val * lV ** 2
|
| 591 |
+
d_bD = exp_val * lD
|
| 592 |
+
d_bVD = exp_val * lV * lD
|
| 593 |
+
|
| 594 |
+
jac = ops.stack([d_L_min, d_a, d_bP, d_bV1, d_bV2, d_bD, d_bVD], axis=-1)
|
| 595 |
+
if pred.shape[0] == 1:
|
| 596 |
+
return pred[0], jac[0]
|
| 597 |
+
return pred, jac
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
PARAM_BOUNDS = {
|
| 601 |
+
# sl_1: [c0, A, b, e, g]
|
| 602 |
+
"sl_1": [(-100, 100), (-1e4, 1e4), (-5, 5), (-5, 5), (-5, 5)],
|
| 603 |
+
# sl_2: [L, A, alpha, beta, C, v0, r]
|
| 604 |
+
"sl_2": [(-100, 100), (-1e4, 1e4), (-5, 5), (-5, 5), (-100, 100), (-10, 30), (0.1, 10)],
|
| 605 |
+
# sl_3: [L0, a, alpha, b, beta, phi, q]
|
| 606 |
+
"sl_3": [(-100, 100), (-1e4, 1e4), (-5, 5), (-1e4, 1e4), (-5, 5), (-5, 5), (0.1, 10)],
|
| 607 |
+
# sl_4: [L_inf, A, a, b, d, lam, g]
|
| 608 |
+
"sl_4": [(-100, 100), (-1e4, 1e4), (-5, 5), (-5, 5), (-5, 5), (1e-6, 1e4), (-5, 5)],
|
| 609 |
+
# sl_5: [p0, p1, p2, p3, p4, p5, p6]
|
| 610 |
+
"sl_5": [(-1e4, 1e4), (-5, 5), (-5, 5), (-5, 5), (-1e4, 1e4), (-5, 5), (-100, 100)],
|
| 611 |
+
# sl_6: [A, alpha, k1, B, beta, k2, c0]
|
| 612 |
+
"sl_6": [(-1e4, 1e4), (-5, 5), (-5, 5), (-1e4, 1e4), (-5, 5), (-5, 5), (-100, 100)],
|
| 613 |
+
# sl_7: [A, alpha, beta, B, gamma, delta, c0]
|
| 614 |
+
"sl_7": [(-1e4, 1e4), (-5, 5), (-5, 5), (-1e4, 1e4), (-5, 5), (-5, 5), (-100, 100)],
|
| 615 |
+
# sl_8: [c0, c1, c2, alpha, c3, gamma, beta]
|
| 616 |
+
"sl_8": [(-100, 100), (-100, 100), (-1e4, 1e4), (-5, 5), (-1e4, 1e4), (-5, 5), (-5, 5)],
|
| 617 |
+
# sl_9: [A, alpha, beta, gamma, delta, epsilon, L_inf]
|
| 618 |
+
"sl_9": [(-1e4, 1e4), (-5, 5), (-5, 5), (-100, 100), (-1e4, 1e4), (-5, 5), (-100, 100)],
|
| 619 |
+
# sl_10: [L_min, a, bP, bV1, bV2, bD, bVD] — exp(poly) with clamp [-50,50]
|
| 620 |
+
"sl_10": [(-100, 100), (-20, 20), (-2, 2), (-2, 2), (-0.1, 0.1), (-2, 2), (-0.1, 0.1)],
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
LAW_REGISTRY = {
|
| 624 |
+
"sl_1": sl_1, "sl_2": sl_2, "sl_3": sl_3, "sl_4": sl_4, "sl_5": sl_5,
|
| 625 |
+
"sl_6": sl_6, "sl_7": sl_7, "sl_8": sl_8, "sl_9": sl_9, "sl_10": sl_10,
|
| 626 |
+
}
|
| 627 |
+
PARAM_COUNTS = {
|
| 628 |
+
"sl_1": 5, "sl_2": 7, "sl_3": 7, "sl_4": 7, "sl_5": 7,
|
| 629 |
+
"sl_6": 7, "sl_7": 7, "sl_8": 7, "sl_9": 7, "sl_10": 7,
|
| 630 |
+
}
|
vocab_scaling_law/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e94a7f367168596511ee0024fe52acca6ef1a2fceef9c605b6b5b62e748396cb
|
| 3 |
+
size 4341
|
vocab_scaling_law/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:959415949e790c46d30b445536d2a364d804c7c942398320dd0677ed3ecdf633
|
| 3 |
+
size 19500
|