diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..af64ad2273c761dad5e437d4753108cd6f121a8c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,104 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_45fdbd5.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_490e187.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_9ad78ee.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cu128-x86_64-windows/_tinygrad_rms_cuda_6e9aef6.pyd filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch211-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch211-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch211-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu129-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch211-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch211-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch211-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text +build/torch29-cxx11-cu129-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so filter=lfs diff=lfs merge=lfs -text diff --git a/benchmarks/benchmark.py b/benchmarks/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a27262954181de9ef4dc496b6d16d7738e3877 --- /dev/null +++ b/benchmarks/benchmark.py @@ -0,0 +1,47 @@ +import torch + +from kernels.benchmark import Benchmark + + +def rmsnorm_reference(x: torch.Tensor, eps: float) -> torch.Tensor: + rms = torch.sqrt(torch.mean(x**2, dim=-1, keepdim=True) + eps) + return x / rms + + +class TinygradRmsBenchmark(Benchmark): + seed: int = 42 + + def setup(self): + batch_size = 32 + seq_len = 512 + hidden_size = 1024 + self.eps = 1e-6 + + self.x = torch.randn( + batch_size, seq_len, hidden_size, device=self.device, dtype=torch.float32 + ) + self.out = torch.empty_like(self.x) + + def benchmark_base(self): + self.out = self.kernel.tinygrad_rms_norm_simple(self.x, self.eps) + + def verify_base(self) -> torch.Tensor: + return rmsnorm_reference(self.x, self.eps) + + def setup_large(self): + # Note: hidden_size must be 1024 (kernel constraint) + batch_size = 64 + seq_len = 1024 + hidden_size = 1024 + self.eps = 1e-6 + + self.x = torch.randn( + batch_size, seq_len, hidden_size, device=self.device, dtype=torch.float32 + ) + self.out = torch.empty_like(self.x) + + def benchmark_large(self): + self.out = self.kernel.tinygrad_rms_norm_simple(self.x, self.eps) + + def verify_large(self) -> torch.Tensor: + return rmsnorm_reference(self.x, self.eps) diff --git a/build/torch210-cu128-x86_64-windows/__init__.py b/build/torch210-cu128-x86_64-windows/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74b63dfd90af2e1800191c944c85218bd6c5a57d --- /dev/null +++ b/build/torch210-cu128-x86_64-windows/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch210-cu128-x86_64-windows/_ops.py b/build/torch210-cu128-x86_64-windows/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7e9f3f50550e2adde941162db174b353b4e9adb6 --- /dev/null +++ b/build/torch210-cu128-x86_64-windows/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6e9aef6 +ops = torch.ops._tinygrad_rms_cuda_6e9aef6 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6e9aef6::{op_name}" diff --git a/build/torch210-cu128-x86_64-windows/_tinygrad_rms_cuda_6e9aef6.pyd b/build/torch210-cu128-x86_64-windows/_tinygrad_rms_cuda_6e9aef6.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9fc01c80f6e20e4d283f6035ef99ed1adeb3eeda --- /dev/null +++ b/build/torch210-cu128-x86_64-windows/_tinygrad_rms_cuda_6e9aef6.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2015223a8a6040fa56544f699eab3c9f4723865f663aebf61fbd06d8fd250bea +size 320000 diff --git a/build/torch210-cu128-x86_64-windows/metadata.json b/build/torch210-cu128-x86_64-windows/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..0817130b54a94ab37b5481b9d75ae6347fe5b2b4 --- /dev/null +++ b/build/torch210-cu128-x86_64-windows/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch210-cu128-x86_64-windows/tinygrad_rms/__init__.py b/build/torch210-cu128-x86_64-windows/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc434ef44e63409acb52a8f3fff54a4adc46ed6a --- /dev/null +++ b/build/torch210-cu128-x86_64-windows/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch210-cxx11-cu126-aarch64-linux/__init__.py b/build/torch210-cxx11-cu126-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch210-cxx11-cu126-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch210-cxx11-cu126-aarch64-linux/_ops.py b/build/torch210-cxx11-cu126-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch210-cxx11-cu126-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..a76d908951da5f137869fa5872d9e94d0d027d8a --- /dev/null +++ b/build/torch210-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a26e93d9a6daa5741ae1b976db32aadf2a9342f67211cc6ab1caa5348406e75 +size 2241168 diff --git a/build/torch210-cxx11-cu126-aarch64-linux/metadata.json b/build/torch210-cxx11-cu126-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..73f339fd6288c5c35226df154c70a8d9b0de7862 --- /dev/null +++ b/build/torch210-cxx11-cu126-aarch64-linux/metadata.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0+PTX" + ] + } +} diff --git a/build/torch210-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py b/build/torch210-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch210-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch210-cxx11-cu126-x86_64-linux/__init__.py b/build/torch210-cxx11-cu126-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch210-cxx11-cu126-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_ops.py b/build/torch210-cxx11-cu126-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch210-cxx11-cu126-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..18a50a692a6b0c1ffeb1a9ecb69c2a83d18e1141 --- /dev/null +++ b/build/torch210-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6dc5ab336157c3e6c577ecc8dca3790d4ac9273ae4b38ebd45e7dee0c6c20a5 +size 2123424 diff --git a/build/torch210-cxx11-cu126-x86_64-linux/metadata.json b/build/torch210-cxx11-cu126-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..73f339fd6288c5c35226df154c70a8d9b0de7862 --- /dev/null +++ b/build/torch210-cxx11-cu126-x86_64-linux/metadata.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0+PTX" + ] + } +} diff --git a/build/torch210-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py b/build/torch210-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch210-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch210-cxx11-cu128-aarch64-linux/__init__.py b/build/torch210-cxx11-cu128-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch210-cxx11-cu128-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch210-cxx11-cu128-aarch64-linux/_ops.py b/build/torch210-cxx11-cu128-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch210-cxx11-cu128-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..3e6ee421b2d12b71a1e854b7fa22ad803d9302bd --- /dev/null +++ b/build/torch210-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e05735521a2de0896d311bbfdda83b8d5c813e1498597b6613728a029f969665 +size 2372360 diff --git a/build/torch210-cxx11-cu128-aarch64-linux/metadata.json b/build/torch210-cxx11-cu128-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch210-cxx11-cu128-aarch64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch210-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py b/build/torch210-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch210-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch210-cxx11-cu128-x86_64-linux/__init__.py b/build/torch210-cxx11-cu128-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch210-cxx11-cu128-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_ops.py b/build/torch210-cxx11-cu128-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch210-cxx11-cu128-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..7d1e77238d7d2b6303409c36268ffa26b03afd8a --- /dev/null +++ b/build/torch210-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:645b392a5deadd70a5405bbceeb2161c9b852cb0a303b826e8ee633ee4443883 +size 2244040 diff --git a/build/torch210-cxx11-cu128-x86_64-linux/metadata.json b/build/torch210-cxx11-cu128-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch210-cxx11-cu128-x86_64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch210-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py b/build/torch210-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch210-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch210-cxx11-cu130-aarch64-linux/__init__.py b/build/torch210-cxx11-cu130-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch210-cxx11-cu130-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch210-cxx11-cu130-aarch64-linux/_ops.py b/build/torch210-cxx11-cu130-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch210-cxx11-cu130-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..140bdf67f85a5e1415e55d1540347b6bd3e2d259 --- /dev/null +++ b/build/torch210-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dab93449e780276899a05ae1303d2a7c34c859ec5f596592246169999c6879b4 +size 2374320 diff --git a/build/torch210-cxx11-cu130-aarch64-linux/metadata.json b/build/torch210-cxx11-cu130-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7cac0fd442ab13f9583e07ceecbd5afa7df72535 --- /dev/null +++ b/build/torch210-cxx11-cu130-aarch64-linux/metadata.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "11.0", + "12.0+PTX", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch210-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py b/build/torch210-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch210-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch210-cxx11-cu130-x86_64-linux/__init__.py b/build/torch210-cxx11-cu130-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch210-cxx11-cu130-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_ops.py b/build/torch210-cxx11-cu130-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch210-cxx11-cu130-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..046c220f9a36755f873df6cec608398cb3c5569a --- /dev/null +++ b/build/torch210-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bd783d80e077f2983793e5fdc8a0f5478836851435a8b77f00b186fea64c8f4 +size 2245848 diff --git a/build/torch210-cxx11-cu130-x86_64-linux/metadata.json b/build/torch210-cxx11-cu130-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7cac0fd442ab13f9583e07ceecbd5afa7df72535 --- /dev/null +++ b/build/torch210-cxx11-cu130-x86_64-linux/metadata.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "11.0", + "12.0+PTX", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch210-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py b/build/torch210-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch210-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch211-cxx11-cu126-aarch64-linux/__init__.py b/build/torch211-cxx11-cu126-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch211-cxx11-cu126-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch211-cxx11-cu126-aarch64-linux/_ops.py b/build/torch211-cxx11-cu126-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch211-cxx11-cu126-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch211-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch211-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..ea764004946c837126fd9905f4e7592e8780f372 --- /dev/null +++ b/build/torch211-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b0b368d677277876427af5179c177f36d614fc7787e258b582bca5ac653b332 +size 2237384 diff --git a/build/torch211-cxx11-cu126-aarch64-linux/metadata.json b/build/torch211-cxx11-cu126-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..73f339fd6288c5c35226df154c70a8d9b0de7862 --- /dev/null +++ b/build/torch211-cxx11-cu126-aarch64-linux/metadata.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0+PTX" + ] + } +} diff --git a/build/torch211-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py b/build/torch211-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch211-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch211-cxx11-cu126-x86_64-linux/__init__.py b/build/torch211-cxx11-cu126-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch211-cxx11-cu126-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch211-cxx11-cu126-x86_64-linux/_ops.py b/build/torch211-cxx11-cu126-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch211-cxx11-cu126-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch211-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch211-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..44e1fa46dcc80e18782319d67472807d0c13646b --- /dev/null +++ b/build/torch211-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6394aee38bd05dd68b98ab6e87ff95748c1662dad1b85178c475cd76de7cbf4 +size 2112256 diff --git a/build/torch211-cxx11-cu126-x86_64-linux/metadata.json b/build/torch211-cxx11-cu126-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..73f339fd6288c5c35226df154c70a8d9b0de7862 --- /dev/null +++ b/build/torch211-cxx11-cu126-x86_64-linux/metadata.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0+PTX" + ] + } +} diff --git a/build/torch211-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py b/build/torch211-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch211-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch211-cxx11-cu128-aarch64-linux/__init__.py b/build/torch211-cxx11-cu128-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch211-cxx11-cu128-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch211-cxx11-cu128-aarch64-linux/_ops.py b/build/torch211-cxx11-cu128-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch211-cxx11-cu128-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch211-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch211-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..0cdb2bd286b82ed1af84f4ec987cf1868f6f12de --- /dev/null +++ b/build/torch211-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7a77fa708df07fe1cd9181db9b445dd9a2535bef9968d170c437868f6cca5a3 +size 2368568 diff --git a/build/torch211-cxx11-cu128-aarch64-linux/metadata.json b/build/torch211-cxx11-cu128-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch211-cxx11-cu128-aarch64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch211-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py b/build/torch211-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch211-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch211-cxx11-cu128-x86_64-linux/__init__.py b/build/torch211-cxx11-cu128-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch211-cxx11-cu128-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch211-cxx11-cu128-x86_64-linux/_ops.py b/build/torch211-cxx11-cu128-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch211-cxx11-cu128-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch211-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch211-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..4fe74b9aee73a2dee1bdfcd58a8f6a30fc822978 --- /dev/null +++ b/build/torch211-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2528a713f5fddd2d8ec0ab7dad1b08e59a693bcc5ea3d4c060a6c25760bc4956 +size 2232880 diff --git a/build/torch211-cxx11-cu128-x86_64-linux/metadata.json b/build/torch211-cxx11-cu128-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch211-cxx11-cu128-x86_64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch211-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py b/build/torch211-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch211-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch211-cxx11-cu130-aarch64-linux/__init__.py b/build/torch211-cxx11-cu130-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch211-cxx11-cu130-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch211-cxx11-cu130-aarch64-linux/_ops.py b/build/torch211-cxx11-cu130-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch211-cxx11-cu130-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch211-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch211-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..da86c89438d18c74d427ec589df7e4fac5008400 --- /dev/null +++ b/build/torch211-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:060f7948b347b77b586d70b4f9d20bdf4d630825c295bdf606d94bac041a1a7f +size 2370528 diff --git a/build/torch211-cxx11-cu130-aarch64-linux/metadata.json b/build/torch211-cxx11-cu130-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7cac0fd442ab13f9583e07ceecbd5afa7df72535 --- /dev/null +++ b/build/torch211-cxx11-cu130-aarch64-linux/metadata.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "11.0", + "12.0+PTX", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch211-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py b/build/torch211-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch211-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch211-cxx11-cu130-x86_64-linux/__init__.py b/build/torch211-cxx11-cu130-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch211-cxx11-cu130-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch211-cxx11-cu130-x86_64-linux/_ops.py b/build/torch211-cxx11-cu130-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch211-cxx11-cu130-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch211-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch211-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..c88352710670880b76972492e9c1e6002a92a062 --- /dev/null +++ b/build/torch211-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1987caa31a62d13612871c2554f23facb2727ff58e5d274fc98ec39a105d24aa +size 2234688 diff --git a/build/torch211-cxx11-cu130-x86_64-linux/metadata.json b/build/torch211-cxx11-cu130-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7cac0fd442ab13f9583e07ceecbd5afa7df72535 --- /dev/null +++ b/build/torch211-cxx11-cu130-x86_64-linux/metadata.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "11.0", + "12.0+PTX", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch211-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py b/build/torch211-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch211-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch28-cxx11-cu126-x86_64-linux/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch28-cxx11-cu126-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84041c1e508c386779e5b1180abdc300d996193c --- /dev/null +++ b/build/torch28-cxx11-cu126-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_eb79f2b +ops = torch.ops._tinygrad_rms_eb79f2b + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_eb79f2b::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..6dc7325a82e70695fe03c8ea1a04bc54b4ce5a48 --- /dev/null +++ b/build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e26fe31730049e6cb5c4af6d3e8f1f7bd8e25015bd01bfe60dab84bc9f21bbf +size 2116936 diff --git a/build/torch28-cxx11-cu126-x86_64-linux/metadata.json b/build/torch28-cxx11-cu126-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..9cf5deed9898dce769f4cc73913d3530b92a0bd8 --- /dev/null +++ b/build/torch28-cxx11-cu126-x86_64-linux/metadata.json @@ -0,0 +1,4 @@ +{ + "version": 1, + "python-depends": [] +} \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch28-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch28-cxx11-cu128-x86_64-linux/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch28-cxx11-cu128-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84041c1e508c386779e5b1180abdc300d996193c --- /dev/null +++ b/build/torch28-cxx11-cu128-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_eb79f2b +ops = torch.ops._tinygrad_rms_eb79f2b + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_eb79f2b::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..a6f5b99d6fb945562a9ccb2eb2dbaeeeff156018 --- /dev/null +++ b/build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20b810df43540619f562634b185e164ee0bb2498f1cc1a07eaa3a739f943659e +size 2229096 diff --git a/build/torch28-cxx11-cu128-x86_64-linux/metadata.json b/build/torch28-cxx11-cu128-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..9cf5deed9898dce769f4cc73913d3530b92a0bd8 --- /dev/null +++ b/build/torch28-cxx11-cu128-x86_64-linux/metadata.json @@ -0,0 +1,4 @@ +{ + "version": 1, + "python-depends": [] +} \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch28-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch28-cxx11-cu129-x86_64-linux/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch28-cxx11-cu129-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84041c1e508c386779e5b1180abdc300d996193c --- /dev/null +++ b/build/torch28-cxx11-cu129-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_eb79f2b +ops = torch.ops._tinygrad_rms_eb79f2b + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_eb79f2b::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..f76ad8d999ab2117014e1f83af00f4258dbd3ae8 --- /dev/null +++ b/build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_eb79f2b.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91d8e5e0d5cc8d7a1557b4b62eca0a99e1795b08d69f2a8ca6b19569f058e51d +size 2262880 diff --git a/build/torch28-cxx11-cu129-x86_64-linux/metadata.json b/build/torch28-cxx11-cu129-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..9cf5deed9898dce769f4cc73913d3530b92a0bd8 --- /dev/null +++ b/build/torch28-cxx11-cu129-x86_64-linux/metadata.json @@ -0,0 +1,4 @@ +{ + "version": 1, + "python-depends": [] +} \ No newline at end of file diff --git a/build/torch28-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch28-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu126-aarch64-linux/__init__.py b/build/torch29-cxx11-cu126-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu126-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu126-aarch64-linux/_ops.py b/build/torch29-cxx11-cu126-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a2989d8e40da6e34b8d44625d04317835c9cb947 --- /dev/null +++ b/build/torch29-cxx11-cu126-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_9a0d032 +ops = torch.ops._tinygrad_rms_cuda_9a0d032 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_9a0d032::{op_name}" diff --git a/build/torch29-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so b/build/torch29-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..e90214a328042c7905f447051075a4b3bee57d44 --- /dev/null +++ b/build/torch29-cxx11-cu126-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a7c9e663b5b1611f5e64309b6f3e7377c545b564a21b81ff0cf35bb0e4cea23 +size 2238056 diff --git a/build/torch29-cxx11-cu126-aarch64-linux/metadata.json b/build/torch29-cxx11-cu126-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..73f339fd6288c5c35226df154c70a8d9b0de7862 --- /dev/null +++ b/build/torch29-cxx11-cu126-aarch64-linux/metadata.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0+PTX" + ] + } +} diff --git a/build/torch29-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch29-cxx11-cu126-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu126-x86_64-linux/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu126-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_ops.py b/build/torch29-cxx11-cu126-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a2989d8e40da6e34b8d44625d04317835c9cb947 --- /dev/null +++ b/build/torch29-cxx11-cu126-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_9a0d032 +ops = torch.ops._tinygrad_rms_cuda_9a0d032 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_9a0d032::{op_name}" diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so b/build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..c608a9d26ec13be17e583160cbf6333185454d43 --- /dev/null +++ b/build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3477281860ebd1d3c694e12bfef35c8597938e1411a379200cd92bc7e36aa276 +size 2116928 diff --git a/build/torch29-cxx11-cu126-x86_64-linux/metadata.json b/build/torch29-cxx11-cu126-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..73f339fd6288c5c35226df154c70a8d9b0de7862 --- /dev/null +++ b/build/torch29-cxx11-cu126-x86_64-linux/metadata.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0+PTX" + ] + } +} diff --git a/build/torch29-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch29-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu128-aarch64-linux/__init__.py b/build/torch29-cxx11-cu128-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu128-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu128-aarch64-linux/_ops.py b/build/torch29-cxx11-cu128-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a2989d8e40da6e34b8d44625d04317835c9cb947 --- /dev/null +++ b/build/torch29-cxx11-cu128-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_9a0d032 +ops = torch.ops._tinygrad_rms_cuda_9a0d032 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_9a0d032::{op_name}" diff --git a/build/torch29-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so b/build/torch29-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..69fe43e6e5fd203952e444ff2195553b0425ade4 --- /dev/null +++ b/build/torch29-cxx11-cu128-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dfaae92bc75336a18d6abff1e63e546529173730d24db38167c6915d1dc4bac +size 2368920 diff --git a/build/torch29-cxx11-cu128-aarch64-linux/metadata.json b/build/torch29-cxx11-cu128-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch29-cxx11-cu128-aarch64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch29-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch29-cxx11-cu128-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu128-x86_64-linux/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu128-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_ops.py b/build/torch29-cxx11-cu128-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a2989d8e40da6e34b8d44625d04317835c9cb947 --- /dev/null +++ b/build/torch29-cxx11-cu128-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_9a0d032 +ops = torch.ops._tinygrad_rms_cuda_9a0d032 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_9a0d032::{op_name}" diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so b/build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..c7aa6cb52a63d63af45208f300dd24efac4e5dad --- /dev/null +++ b/build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3e5da37f28ae196d5c1e8dddbf8b07bfd70016dcd4f16bf015ebfa4939e49d2 +size 2233176 diff --git a/build/torch29-cxx11-cu128-x86_64-linux/metadata.json b/build/torch29-cxx11-cu128-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch29-cxx11-cu128-x86_64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch29-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch29-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu129-aarch64-linux/__init__.py b/build/torch29-cxx11-cu129-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu129-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu129-aarch64-linux/_ops.py b/build/torch29-cxx11-cu129-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch29-cxx11-cu129-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch29-cxx11-cu129-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch29-cxx11-cu129-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..b2a39c35f79474280b464a8c6008dd36bf61349e --- /dev/null +++ b/build/torch29-cxx11-cu129-aarch64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1649f0273c62407b649546ea3483f0ea2b858425c4b21f9306375c02c1cd6047 +size 2370344 diff --git a/build/torch29-cxx11-cu129-aarch64-linux/metadata.json b/build/torch29-cxx11-cu129-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch29-cxx11-cu129-aarch64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch29-cxx11-cu129-aarch64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu129-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch29-cxx11-cu129-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu129-x86_64-linux/__init__.py b/build/torch29-cxx11-cu129-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu129-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu129-x86_64-linux/_ops.py b/build/torch29-cxx11-cu129-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..84d6a32f0c96d336b10184083c69864cccc0acdf --- /dev/null +++ b/build/torch29-cxx11-cu129-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_6eb00dc +ops = torch.ops._tinygrad_rms_cuda_6eb00dc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_6eb00dc::{op_name}" diff --git a/build/torch29-cxx11-cu129-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so b/build/torch29-cxx11-cu129-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..e97e62e16830494a5d9d1be1c269ecce085e108e --- /dev/null +++ b/build/torch29-cxx11-cu129-x86_64-linux/_tinygrad_rms_cuda_6eb00dc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7bdd7d0c61c7df6af0a414d49acfc4d61c5c23fc4bc8e9b0d9a5e633ac6a88b +size 2262864 diff --git a/build/torch29-cxx11-cu129-x86_64-linux/metadata.json b/build/torch29-cxx11-cu129-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1ad4ac762308faf8c4d2c94a66facdf7ef8e0de2 --- /dev/null +++ b/build/torch29-cxx11-cu129-x86_64-linux/metadata.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "10.1", + "12.0+PTX", + "7.0", + "7.2", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch29-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23 --- /dev/null +++ b/build/torch29-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import importlib.util +import sys +from pathlib import Path +from types import ModuleType + + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu130-aarch64-linux/__init__.py b/build/torch29-cxx11-cu130-aarch64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu130-aarch64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu130-aarch64-linux/_ops.py b/build/torch29-cxx11-cu130-aarch64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a2989d8e40da6e34b8d44625d04317835c9cb947 --- /dev/null +++ b/build/torch29-cxx11-cu130-aarch64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_9a0d032 +ops = torch.ops._tinygrad_rms_cuda_9a0d032 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_9a0d032::{op_name}" diff --git a/build/torch29-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so b/build/torch29-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..9a77a6a2263a1ac6e56fc5f5898a8d0c3600db9f --- /dev/null +++ b/build/torch29-cxx11-cu130-aarch64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af049d5728ec5b6ab0f062c9ab0c3b37e16a2376f484b8ec47fa0ebc502aab3c +size 2370880 diff --git a/build/torch29-cxx11-cu130-aarch64-linux/metadata.json b/build/torch29-cxx11-cu130-aarch64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7cac0fd442ab13f9583e07ceecbd5afa7df72535 --- /dev/null +++ b/build/torch29-cxx11-cu130-aarch64-linux/metadata.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "11.0", + "12.0+PTX", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch29-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch29-cxx11-cu130-aarch64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py"))) diff --git a/build/torch29-cxx11-cu130-x86_64-linux/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6350bb09a2d23a877d737f90d8cd4124f7f66ea --- /dev/null +++ b/build/torch29-cxx11-cu130-x86_64-linux/__init__.py @@ -0,0 +1,63 @@ +from typing import Optional, Tuple + +import torch + +from ._ops import ops + + +def tinygrad_rms_norm( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon)) + + This implementation uses a two-kernel approach: + 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row + 2. Multiply input by the computed factor + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Tuple of (output tensor, rms_inv tensor) + """ + if out is None: + out = torch.empty_like(x) + + hidden_size = x.size(-1) + num_rows = x.numel() // hidden_size + rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device) + + ops.tinygrad_rms_norm(out, rms_inv, x, epsilon) + return out, rms_inv + + +def tinygrad_rms_norm_simple( + x: torch.Tensor, + epsilon: float = 1e-6, + out: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Compute RMSNorm using tinygrad-style CUDA kernels. + + This is a simpler interface that only returns the normalized output. + + Args: + x: Input tensor of shape (..., hidden_size) + epsilon: Small constant for numerical stability + out: Optional pre-allocated output tensor + + Returns: + Normalized output tensor + """ + if out is None: + out = torch.empty_like(x) + + ops.tinygrad_rms_norm_inplace(out, x, epsilon) + return out diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_ops.py b/build/torch29-cxx11-cu130-x86_64-linux/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a2989d8e40da6e34b8d44625d04317835c9cb947 --- /dev/null +++ b/build/torch29-cxx11-cu130-x86_64-linux/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _tinygrad_rms_cuda_9a0d032 +ops = torch.ops._tinygrad_rms_cuda_9a0d032 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_tinygrad_rms_cuda_9a0d032::{op_name}" diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so b/build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..da73166e9512770b62723c38fd0b37db1af9d645 --- /dev/null +++ b/build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_cuda_9a0d032.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccfa477d1fd13da0ae451bc359cf7d9d0de5ea9ad679a87b248d2d5b181ca379 +size 2234880 diff --git a/build/torch29-cxx11-cu130-x86_64-linux/metadata.json b/build/torch29-cxx11-cu130-x86_64-linux/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7cac0fd442ab13f9583e07ceecbd5afa7df72535 --- /dev/null +++ b/build/torch29-cxx11-cu130-x86_64-linux/metadata.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "license": "MIT", + "python-depends": [], + "backend": { + "type": "cuda", + "archs": [ + "10.0", + "11.0", + "12.0+PTX", + "7.5", + "8.0", + "8.6", + "8.7", + "8.9", + "9.0" + ] + } +} diff --git a/build/torch29-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309 --- /dev/null +++ b/build/torch29-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py @@ -0,0 +1,26 @@ +import ctypes +import sys + +import importlib +from pathlib import Path +from types import ModuleType + +def _import_from_path(file_path: Path) -> ModuleType: + # We cannot use the module name as-is, after adding it to `sys.modules`, + # it would also be used for other imports. So, we make a module name that + # depends on the path for it to be unique using the hex-encoded hash of + # the path. + path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value) + module_name = path_hash + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None: + raise ImportError(f"Cannot load spec for {module_name} from {file_path}") + module = importlib.util.module_from_spec(spec) + if module is None: + raise ImportError(f"Cannot load module {module_name} from spec") + sys.modules[module_name] = module + spec.loader.exec_module(module) # type: ignore + return module + + +globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))