drbh commited on
Commit ·
a80a7c6
unverified ·
0
Parent(s):
Migrated from kernels-community/tinygrad-rms
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +104 -0
- benchmarks/benchmark.py +47 -0
- build/torch210-cu128-x86_64-windows/__init__.py +63 -0
- build/torch210-cu128-x86_64-windows/_ops.py +9 -0
- build/torch210-cu128-x86_64-windows/_tinygrad_rms_cuda_6e9aef6.pyd +3 -0
- build/torch210-cu128-x86_64-windows/metadata.json +21 -0
- build/torch210-cu128-x86_64-windows/tinygrad_rms/__init__.py +26 -0
- build/torch210-cxx11-cu126-aarch64-linux/__init__.py +63 -0
- build/torch210-cxx11-cu126-aarch64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch210-cxx11-cu126-aarch64-linux/metadata.json +18 -0
- build/torch210-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch210-cxx11-cu126-x86_64-linux/__init__.py +63 -0
- build/torch210-cxx11-cu126-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch210-cxx11-cu126-x86_64-linux/metadata.json +18 -0
- build/torch210-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch210-cxx11-cu128-aarch64-linux/__init__.py +63 -0
- build/torch210-cxx11-cu128-aarch64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch210-cxx11-cu128-aarch64-linux/metadata.json +21 -0
- build/torch210-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch210-cxx11-cu128-x86_64-linux/__init__.py +63 -0
- build/torch210-cxx11-cu128-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch210-cxx11-cu128-x86_64-linux/metadata.json +21 -0
- build/torch210-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch210-cxx11-cu130-aarch64-linux/__init__.py +63 -0
- build/torch210-cxx11-cu130-aarch64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch210-cxx11-cu130-aarch64-linux/metadata.json +19 -0
- build/torch210-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch210-cxx11-cu130-x86_64-linux/__init__.py +63 -0
- build/torch210-cxx11-cu130-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch210-cxx11-cu130-x86_64-linux/metadata.json +19 -0
- build/torch210-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch211-cxx11-cu126-aarch64-linux/__init__.py +63 -0
- build/torch211-cxx11-cu126-aarch64-linux/_ops.py +9 -0
- build/torch211-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch211-cxx11-cu126-aarch64-linux/metadata.json +18 -0
- build/torch211-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch211-cxx11-cu126-x86_64-linux/__init__.py +63 -0
- build/torch211-cxx11-cu126-x86_64-linux/_ops.py +9 -0
- build/torch211-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
- build/torch211-cxx11-cu126-x86_64-linux/metadata.json +18 -0
- build/torch211-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py +26 -0
- build/torch211-cxx11-cu128-aarch64-linux/__init__.py +63 -0
- build/torch211-cxx11-cu128-aarch64-linux/_ops.py +9 -0
- build/torch211-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
build/torch29-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
build/torch29-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
build/torch29-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
build/torch210-cu128-x86_64-windows/_tinygrad_rms_cuda_6e9aef6.pyd filter=lfs diff=lfs merge=lfs -text
|
| 85 |
+
build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 86 |
+
build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 87 |
+
build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 88 |
+
build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 89 |
+
build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 90 |
+
build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 91 |
+
build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 92 |
+
build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 93 |
+
build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 94 |
+
build/torch211-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 95 |
+
build/torch211-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 96 |
+
build/torch211-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 97 |
+
build/torch29-cxx11-cu129-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 98 |
+
build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 99 |
+
build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 100 |
+
build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 101 |
+
build/torch211-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 102 |
+
build/torch211-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 103 |
+
build/torch211-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 104 |
+
build/torch29-cxx11-cu129-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
benchmarks/benchmark.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from kernels.benchmark import Benchmark
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def rmsnorm_reference(x: torch.Tensor, eps: float) -> torch.Tensor:
|
| 7 |
+
rms = torch.sqrt(torch.mean(x**2, dim=-1, keepdim=True) + eps)
|
| 8 |
+
return x / rms
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TinygradRmsBenchmark(Benchmark):
|
| 12 |
+
seed: int = 42
|
| 13 |
+
|
| 14 |
+
def setup(self):
|
| 15 |
+
batch_size = 32
|
| 16 |
+
seq_len = 512
|
| 17 |
+
hidden_size = 1024
|
| 18 |
+
self.eps = 1e-6
|
| 19 |
+
|
| 20 |
+
self.x = torch.randn(
|
| 21 |
+
batch_size, seq_len, hidden_size, device=self.device, dtype=torch.float32
|
| 22 |
+
)
|
| 23 |
+
self.out = torch.empty_like(self.x)
|
| 24 |
+
|
| 25 |
+
def benchmark_base(self):
|
| 26 |
+
self.out = self.kernel.tinygrad_rms_norm_simple(self.x, self.eps)
|
| 27 |
+
|
| 28 |
+
def verify_base(self) -> torch.Tensor:
|
| 29 |
+
return rmsnorm_reference(self.x, self.eps)
|
| 30 |
+
|
| 31 |
+
def setup_large(self):
|
| 32 |
+
# Note: hidden_size must be 1024 (kernel constraint)
|
| 33 |
+
batch_size = 64
|
| 34 |
+
seq_len = 1024
|
| 35 |
+
hidden_size = 1024
|
| 36 |
+
self.eps = 1e-6
|
| 37 |
+
|
| 38 |
+
self.x = torch.randn(
|
| 39 |
+
batch_size, seq_len, hidden_size, device=self.device, dtype=torch.float32
|
| 40 |
+
)
|
| 41 |
+
self.out = torch.empty_like(self.x)
|
| 42 |
+
|
| 43 |
+
def benchmark_large(self):
|
| 44 |
+
self.out = self.kernel.tinygrad_rms_norm_simple(self.x, self.eps)
|
| 45 |
+
|
| 46 |
+
def verify_large(self) -> torch.Tensor:
|
| 47 |
+
return rmsnorm_reference(self.x, self.eps)
|
build/torch210-cu128-x86_64-windows/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch210-cu128-x86_64-windows/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6e9aef6
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6e9aef6
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6e9aef6::{op_name}"
|
build/torch210-cu128-x86_64-windows/_tinygrad_rms_cuda_6e9aef6.pyd
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2015223a8a6040fa56544f699eab3c9f4723865f663aebf61fbd06d8fd250bea
|
| 3 |
+
size 320000
|
build/torch210-cu128-x86_64-windows/metadata.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"10.1",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.0",
|
| 12 |
+
"7.2",
|
| 13 |
+
"7.5",
|
| 14 |
+
"8.0",
|
| 15 |
+
"8.6",
|
| 16 |
+
"8.7",
|
| 17 |
+
"8.9",
|
| 18 |
+
"9.0"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
build/torch210-cu128-x86_64-windows/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu126-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch210-cxx11-cu126-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a26e93d9a6daa5741ae1b976db32aadf2a9342f67211cc6ab1caa5348406e75
|
| 3 |
+
size 2241168
|
build/torch210-cxx11-cu126-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch210-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu126-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch210-cxx11-cu126-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6dc5ab336157c3e6c577ecc8dca3790d4ac9273ae4b38ebd45e7dee0c6c20a5
|
| 3 |
+
size 2123424
|
build/torch210-cxx11-cu126-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch210-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu128-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch210-cxx11-cu128-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e05735521a2de0896d311bbfdda83b8d5c813e1498597b6613728a029f969665
|
| 3 |
+
size 2372360
|
build/torch210-cxx11-cu128-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"10.1",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.0",
|
| 12 |
+
"7.2",
|
| 13 |
+
"7.5",
|
| 14 |
+
"8.0",
|
| 15 |
+
"8.6",
|
| 16 |
+
"8.7",
|
| 17 |
+
"8.9",
|
| 18 |
+
"9.0"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
build/torch210-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu128-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch210-cxx11-cu128-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:645b392a5deadd70a5405bbceeb2161c9b852cb0a303b826e8ee633ee4443883
|
| 3 |
+
size 2244040
|
build/torch210-cxx11-cu128-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"10.1",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.0",
|
| 12 |
+
"7.2",
|
| 13 |
+
"7.5",
|
| 14 |
+
"8.0",
|
| 15 |
+
"8.6",
|
| 16 |
+
"8.7",
|
| 17 |
+
"8.9",
|
| 18 |
+
"9.0"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
build/torch210-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu130-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch210-cxx11-cu130-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dab93449e780276899a05ae1303d2a7c34c859ec5f596592246169999c6879b4
|
| 3 |
+
size 2374320
|
build/torch210-cxx11-cu130-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"11.0",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.5",
|
| 12 |
+
"8.0",
|
| 13 |
+
"8.6",
|
| 14 |
+
"8.7",
|
| 15 |
+
"8.9",
|
| 16 |
+
"9.0"
|
| 17 |
+
]
|
| 18 |
+
}
|
| 19 |
+
}
|
build/torch210-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu130-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch210-cxx11-cu130-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3bd783d80e077f2983793e5fdc8a0f5478836851435a8b77f00b186fea64c8f4
|
| 3 |
+
size 2245848
|
build/torch210-cxx11-cu130-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"11.0",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.5",
|
| 12 |
+
"8.0",
|
| 13 |
+
"8.6",
|
| 14 |
+
"8.7",
|
| 15 |
+
"8.9",
|
| 16 |
+
"9.0"
|
| 17 |
+
]
|
| 18 |
+
}
|
| 19 |
+
}
|
build/torch210-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cu126-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch211-cxx11-cu126-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch211-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b0b368d677277876427af5179c177f36d614fc7787e258b582bca5ac653b332
|
| 3 |
+
size 2237384
|
build/torch211-cxx11-cu126-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch211-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cu126-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch211-cxx11-cu126-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch211-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f6394aee38bd05dd68b98ab6e87ff95748c1662dad1b85178c475cd76de7cbf4
|
| 3 |
+
size 2112256
|
build/torch211-cxx11-cu126-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "MIT",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch211-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cu128-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def tinygrad_rms_norm(
|
| 9 |
+
x: torch.Tensor,
|
| 10 |
+
epsilon: float = 1e-6,
|
| 11 |
+
out: Optional[torch.Tensor] = None,
|
| 12 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 13 |
+
"""
|
| 14 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 15 |
+
|
| 16 |
+
RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
|
| 17 |
+
|
| 18 |
+
This implementation uses a two-kernel approach:
|
| 19 |
+
1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
|
| 20 |
+
2. Multiply input by the computed factor
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x: Input tensor of shape (..., hidden_size)
|
| 24 |
+
epsilon: Small constant for numerical stability
|
| 25 |
+
out: Optional pre-allocated output tensor
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
Tuple of (output tensor, rms_inv tensor)
|
| 29 |
+
"""
|
| 30 |
+
if out is None:
|
| 31 |
+
out = torch.empty_like(x)
|
| 32 |
+
|
| 33 |
+
hidden_size = x.size(-1)
|
| 34 |
+
num_rows = x.numel() // hidden_size
|
| 35 |
+
rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
|
| 36 |
+
|
| 37 |
+
ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
|
| 38 |
+
return out, rms_inv
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def tinygrad_rms_norm_simple(
|
| 42 |
+
x: torch.Tensor,
|
| 43 |
+
epsilon: float = 1e-6,
|
| 44 |
+
out: Optional[torch.Tensor] = None,
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Compute RMSNorm using tinygrad-style CUDA kernels.
|
| 48 |
+
|
| 49 |
+
This is a simpler interface that only returns the normalized output.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
x: Input tensor of shape (..., hidden_size)
|
| 53 |
+
epsilon: Small constant for numerical stability
|
| 54 |
+
out: Optional pre-allocated output tensor
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Normalized output tensor
|
| 58 |
+
"""
|
| 59 |
+
if out is None:
|
| 60 |
+
out = torch.empty_like(x)
|
| 61 |
+
|
| 62 |
+
ops.tinygrad_rms_norm_inplace(out, x, epsilon)
|
| 63 |
+
return out
|
build/torch211-cxx11-cu128-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _tinygrad_rms_cuda_6eb00dc
|
| 3 |
+
ops = torch.ops._tinygrad_rms_cuda_6eb00dc
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_tinygrad_rms_cuda_6eb00dc::{op_name}"
|
build/torch211-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7a77fa708df07fe1cd9181db9b445dd9a2535bef9968d170c437868f6cca5a3
|
| 3 |
+
size 2368568
|