drbh commited on
Commit ·
357c41f
unverified ·
0
Parent(s):
Migrated from kernels-community/rmsnorm
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +100 -0
- README.md +5 -0
- build/torch210-cxx11-cpu-x86_64-linux/__init__.py +27 -0
- build/torch210-cxx11-cpu-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_1a02f6f.abi3.so +3 -0
- build/torch210-cxx11-cpu-x86_64-linux/layers.py +59 -0
- build/torch210-cxx11-cpu-x86_64-linux/metadata.json +8 -0
- build/torch210-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py +26 -0
- build/torch210-cxx11-xpu20253-x86_64-linux/__init__.py +27 -0
- build/torch210-cxx11-xpu20253-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_1a02f6f.abi3.so +3 -0
- build/torch210-cxx11-xpu20253-x86_64-linux/layers.py +59 -0
- build/torch210-cxx11-xpu20253-x86_64-linux/metadata.json +8 -0
- build/torch210-cxx11-xpu20253-x86_64-linux/rmsnorm/__init__.py +26 -0
- build/torch210-xpu20253-x86_64-windows/__init__.py +27 -0
- build/torch210-xpu20253-x86_64-windows/_ops.py +9 -0
- build/torch210-xpu20253-x86_64-windows/_rmsnorm_xpu_2aa36b6.pyd +3 -0
- build/torch210-xpu20253-x86_64-windows/layers.py +59 -0
- build/torch210-xpu20253-x86_64-windows/metadata.json +5 -0
- build/torch210-xpu20253-x86_64-windows/rmsnorm/__init__.py +26 -0
- build/torch211-cxx11-cpu-x86_64-linux/__init__.py +27 -0
- build/torch211-cxx11-cpu-x86_64-linux/_ops.py +9 -0
- build/torch211-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_1a02f6f.abi3.so +3 -0
- build/torch211-cxx11-cpu-x86_64-linux/layers.py +59 -0
- build/torch211-cxx11-cpu-x86_64-linux/metadata.json +8 -0
- build/torch211-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py +26 -0
- build/torch211-cxx11-xpu20253-x86_64-linux/__init__.py +27 -0
- build/torch211-cxx11-xpu20253-x86_64-linux/_ops.py +9 -0
- build/torch211-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_1a02f6f.abi3.so +3 -0
- build/torch211-cxx11-xpu20253-x86_64-linux/layers.py +59 -0
- build/torch211-cxx11-xpu20253-x86_64-linux/metadata.json +8 -0
- build/torch211-cxx11-xpu20253-x86_64-linux/rmsnorm/__init__.py +26 -0
- build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__init__.py +14 -0
- build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__pycache__/__init__.cpython-313.pyc +0 -0
- build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__pycache__/_ops.cpython-313.pyc +0 -0
- build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__pycache__/layers.cpython-313.pyc +0 -0
- build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/_ops.py +9 -0
- build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/_rmsnorm_0d12ee5.abi3.so +3 -0
- build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/layers.py +36 -0
- build/torch28-cxx11-cpu-x86_64-linux/__init__.py +27 -0
- build/torch28-cxx11-cpu-x86_64-linux/_ops.py +9 -0
- build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so +3 -0
- build/torch28-cxx11-cpu-x86_64-linux/layers.py +59 -0
- build/torch28-cxx11-cpu-x86_64-linux/metadata.json +4 -0
- build/torch28-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py +26 -0
- build/torch28-cxx11-xpu20251-x86_64-linux/__init__.py +27 -0
- build/torch28-cxx11-xpu20251-x86_64-linux/_ops.py +9 -0
- build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_235cde1.abi3.so +3 -0
- build/torch28-cxx11-xpu20251-x86_64-linux/layers.py +59 -0
- build/torch28-cxx11-xpu20251-x86_64-linux/metadata.json +4 -0
.gitattributes
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/_rmsnorm_0d12ee5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/rmsnorm/_rmsnorm_0d12ee5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/rmsnorm/_rmsnorm_0d12ee5.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_a7a4369.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_a7a4369.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_a7a4369.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_a7a4369.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_fb26d8c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_fb26d8c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_fb26d8c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_7606158.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_7606158.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_7606158.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_7606158.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_7606158.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_7606158.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_4367ce1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_4367ce1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_4367ce1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_4367ce1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_4367ce1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_4367ce1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_fd30c0c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_fd30c0c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_fd30c0c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_fd30c0c.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
build/torch29-xpu20252-x86_64-windows/rmsnorm/_rmsnorm_96c9886.pyd filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_ce2b5cc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_ce2b5cc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_ce2b5cc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_ce2b5cc.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
build/torch210-xpu20253-x86_64-windows/rmsnorm/_rmsnorm_4cd2f5b.pyd filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_7bbf693.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_7bbf693.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 85 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_7bbf693.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 86 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_xpu_7bbf693.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 87 |
+
build/torch210-xpu20253-x86_64-windows/_rmsnorm_xpu_0f8f3b4.pyd filter=lfs diff=lfs merge=lfs -text
|
| 88 |
+
build/torch210-xpu20253-x86_64-windows/_rmsnorm_xpu_2aa36b6.pyd filter=lfs diff=lfs merge=lfs -text
|
| 89 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_b3d66c6.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 90 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_b3d66c6.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 91 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_b3d66c6.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 92 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_xpu_b3d66c6.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 93 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_cec90b8.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 94 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_cec90b8.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 95 |
+
build/torch211-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_cec90b8.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 96 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_xpu_cec90b8.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 97 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_1a02f6f.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 98 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_1a02f6f.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 99 |
+
build/torch211-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_1a02f6f.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 100 |
+
build/torch211-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_1a02f6f.abi3.so filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- kernels
|
| 4 |
+
- cuda
|
| 5 |
+
---
|
build/torch210-cxx11-cpu-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
+
return ops.apply_rms_norm(
|
| 9 |
+
input,
|
| 10 |
+
weight,
|
| 11 |
+
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
+
|
build/torch210-cxx11-cpu-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_cpu_1a02f6f
|
| 3 |
+
ops = torch.ops._rmsnorm_cpu_1a02f6f
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_cpu_1a02f6f::{op_name}"
|
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_1a02f6f.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c102259696d99bbe9d4c686b4293195548faa4856123a358d44aab3d90148620
|
| 3 |
+
size 2006072
|
build/torch210-cxx11-cpu-x86_64-linux/layers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
+
class RMSNorm(torch.nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
hidden_size (int): The size of the hidden dimension.
|
| 33 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 34 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 35 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 36 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
weight: torch.Tensor
|
| 41 |
+
variance_epsilon: float
|
| 42 |
+
|
| 43 |
+
def forward(self, hidden_states):
|
| 44 |
+
"""
|
| 45 |
+
Apply RMS normalization to the input tensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
+
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
+
hidden_states,
|
| 55 |
+
self.weight,
|
| 56 |
+
self.variance_epsilon,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = ["RMSNorm"]
|
build/torch210-cxx11-cpu-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cpu"
|
| 7 |
+
}
|
| 8 |
+
}
|
build/torch210-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-xpu20253-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
+
return ops.apply_rms_norm(
|
| 9 |
+
input,
|
| 10 |
+
weight,
|
| 11 |
+
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
+
|
build/torch210-cxx11-xpu20253-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_xpu_1a02f6f
|
| 3 |
+
ops = torch.ops._rmsnorm_xpu_1a02f6f
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_xpu_1a02f6f::{op_name}"
|
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_1a02f6f.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a87f0910ab215646183ecd9f4b2cbc5be6c72c3eee20d167f42f71c14629e65
|
| 3 |
+
size 104793360
|
build/torch210-cxx11-xpu20253-x86_64-linux/layers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
+
class RMSNorm(torch.nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
hidden_size (int): The size of the hidden dimension.
|
| 33 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 34 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 35 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 36 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
weight: torch.Tensor
|
| 41 |
+
variance_epsilon: float
|
| 42 |
+
|
| 43 |
+
def forward(self, hidden_states):
|
| 44 |
+
"""
|
| 45 |
+
Apply RMS normalization to the input tensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
+
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
+
hidden_states,
|
| 55 |
+
self.weight,
|
| 56 |
+
self.variance_epsilon,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = ["RMSNorm"]
|
build/torch210-cxx11-xpu20253-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "xpu"
|
| 7 |
+
}
|
| 8 |
+
}
|
build/torch210-cxx11-xpu20253-x86_64-linux/rmsnorm/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-xpu20253-x86_64-windows/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
+
return ops.apply_rms_norm(
|
| 9 |
+
input,
|
| 10 |
+
weight,
|
| 11 |
+
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
+
|
build/torch210-xpu20253-x86_64-windows/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_xpu_2aa36b6
|
| 3 |
+
ops = torch.ops._rmsnorm_xpu_2aa36b6
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_xpu_2aa36b6::{op_name}"
|
build/torch210-xpu20253-x86_64-windows/_rmsnorm_xpu_2aa36b6.pyd
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:690752b7e809e03b7be6d8f5521080ea84115db1078cf6a0010597612e5844d7
|
| 3 |
+
size 2363904
|
build/torch210-xpu20253-x86_64-windows/layers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
+
class RMSNorm(torch.nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
hidden_size (int): The size of the hidden dimension.
|
| 33 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 34 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 35 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 36 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
weight: torch.Tensor
|
| 41 |
+
variance_epsilon: float
|
| 42 |
+
|
| 43 |
+
def forward(self, hidden_states):
|
| 44 |
+
"""
|
| 45 |
+
Apply RMS normalization to the input tensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
+
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
+
hidden_states,
|
| 55 |
+
self.weight,
|
| 56 |
+
self.variance_epsilon,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = ["RMSNorm"]
|
build/torch210-xpu20253-x86_64-windows/metadata.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": []
|
| 5 |
+
}
|
build/torch210-xpu20253-x86_64-windows/rmsnorm/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cpu-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
+
return ops.apply_rms_norm(
|
| 9 |
+
input,
|
| 10 |
+
weight,
|
| 11 |
+
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
+
|
build/torch211-cxx11-cpu-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_cpu_1a02f6f
|
| 3 |
+
ops = torch.ops._rmsnorm_cpu_1a02f6f
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_cpu_1a02f6f::{op_name}"
|
build/torch211-cxx11-cpu-x86_64-linux/_rmsnorm_cpu_1a02f6f.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:439ac1a1bc4a6095844795cbccd7f2137c101bce3e3415bcebb3fd2b0dfcb97b
|
| 3 |
+
size 2001976
|
build/torch211-cxx11-cpu-x86_64-linux/layers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
+
class RMSNorm(torch.nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
hidden_size (int): The size of the hidden dimension.
|
| 33 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 34 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 35 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 36 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
weight: torch.Tensor
|
| 41 |
+
variance_epsilon: float
|
| 42 |
+
|
| 43 |
+
def forward(self, hidden_states):
|
| 44 |
+
"""
|
| 45 |
+
Apply RMS normalization to the input tensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
+
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
+
hidden_states,
|
| 55 |
+
self.weight,
|
| 56 |
+
self.variance_epsilon,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = ["RMSNorm"]
|
build/torch211-cxx11-cpu-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cpu"
|
| 7 |
+
}
|
| 8 |
+
}
|
build/torch211-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-xpu20253-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
+
return ops.apply_rms_norm(
|
| 9 |
+
input,
|
| 10 |
+
weight,
|
| 11 |
+
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
+
|
build/torch211-cxx11-xpu20253-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_xpu_1a02f6f
|
| 3 |
+
ops = torch.ops._rmsnorm_xpu_1a02f6f
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_xpu_1a02f6f::{op_name}"
|
build/torch211-cxx11-xpu20253-x86_64-linux/_rmsnorm_xpu_1a02f6f.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:153aa232ee4f342e92075140aa796e86ccd2f55f07d27bcad90890ed2fac57bf
|
| 3 |
+
size 104793120
|
build/torch211-cxx11-xpu20253-x86_64-linux/layers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
+
class RMSNorm(torch.nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
hidden_size (int): The size of the hidden dimension.
|
| 33 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 34 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 35 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 36 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
weight: torch.Tensor
|
| 41 |
+
variance_epsilon: float
|
| 42 |
+
|
| 43 |
+
def forward(self, hidden_states):
|
| 44 |
+
"""
|
| 45 |
+
Apply RMS normalization to the input tensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
+
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
+
hidden_states,
|
| 55 |
+
self.weight,
|
| 56 |
+
self.variance_epsilon,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = ["RMSNorm"]
|
build/torch211-cxx11-xpu20253-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "xpu"
|
| 7 |
+
}
|
| 8 |
+
}
|
build/torch211-cxx11-xpu20253-x86_64-linux/rmsnorm/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
return ops.apply_rms_norm(
|
| 8 |
+
input,
|
| 9 |
+
weight,
|
| 10 |
+
eps,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
__all__ = ["layers", "apply_rms_norm"]
|
| 14 |
+
|
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (491 Bytes). View file
|
|
|
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__pycache__/_ops.cpython-313.pyc
ADDED
|
Binary file (520 Bytes). View file
|
|
|
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/__pycache__/layers.cpython-313.pyc
ADDED
|
Binary file (1.68 kB). View file
|
|
|
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_0d12ee5
|
| 3 |
+
ops = torch.ops._rmsnorm_0d12ee5
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_0d12ee5::{op_name}"
|
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/_rmsnorm_0d12ee5.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79eb24cb07a24a3f829ce1d210bd0cbd79badd0cc236710a84e83c15575ddf04
|
| 3 |
+
size 100963504
|
build/torch27-cxx11-xpu20250-x86_64-linux/rmsnorm/layers.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNorm(torch.nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
hidden_size (int): The size of the hidden dimension.
|
| 10 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 11 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 12 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 13 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
weight: torch.Tensor
|
| 18 |
+
variance_epsilon: float
|
| 19 |
+
|
| 20 |
+
def forward(self, hidden_states):
|
| 21 |
+
"""
|
| 22 |
+
Apply RMS normalization to the input tensor.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 29 |
+
"""
|
| 30 |
+
return ops.apply_rms_norm(
|
| 31 |
+
hidden_states,
|
| 32 |
+
self.weight,
|
| 33 |
+
self.variance_epsilon,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
__all__ = ["RMSNorm"]
|
build/torch28-cxx11-cpu-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
+
return ops.apply_rms_norm(
|
| 9 |
+
input,
|
| 10 |
+
weight,
|
| 11 |
+
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
+
|
build/torch28-cxx11-cpu-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16c92de9cefabeeadc60ffff87189a1e66ecb9ea19b343570ac55e9d9c7d98fe
|
| 3 |
+
size 156648
|
build/torch28-cxx11-cpu-x86_64-linux/layers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
+
class RMSNorm(torch.nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
hidden_size (int): The size of the hidden dimension.
|
| 33 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 34 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 35 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 36 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
weight: torch.Tensor
|
| 41 |
+
variance_epsilon: float
|
| 42 |
+
|
| 43 |
+
def forward(self, hidden_states):
|
| 44 |
+
"""
|
| 45 |
+
Apply RMS normalization to the input tensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
+
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
+
hidden_states,
|
| 55 |
+
self.weight,
|
| 56 |
+
self.variance_epsilon,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = ["RMSNorm"]
|
build/torch28-cxx11-cpu-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"python-depends": []
|
| 4 |
+
}
|
build/torch28-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch28-cxx11-xpu20251-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import layers
|
| 2 |
+
|
| 3 |
+
from ._ops import ops
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
+
return ops.apply_rms_norm(
|
| 9 |
+
input,
|
| 10 |
+
weight,
|
| 11 |
+
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
+
|
build/torch28-cxx11-xpu20251-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_235cde1.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77c4b43d63dc74b210633da81630023a6d6e359a7a1115bff55da9f4436053d9
|
| 3 |
+
size 103700632
|
build/torch28-cxx11-xpu20251-x86_64-linux/layers.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from ._ops import ops
|
| 3 |
+
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
+
class RMSNorm(torch.nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
hidden_size (int): The size of the hidden dimension.
|
| 33 |
+
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
|
| 34 |
+
offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
|
| 35 |
+
casting_mode (str, optional): The casting mode to use. Defaults to "llama".
|
| 36 |
+
in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
weight: torch.Tensor
|
| 41 |
+
variance_epsilon: float
|
| 42 |
+
|
| 43 |
+
def forward(self, hidden_states):
|
| 44 |
+
"""
|
| 45 |
+
Apply RMS normalization to the input tensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
+
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
+
hidden_states,
|
| 55 |
+
self.weight,
|
| 56 |
+
self.variance_epsilon,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = ["RMSNorm"]
|
build/torch28-cxx11-xpu20251-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"python-depends": []
|
| 4 |
+
}
|