Kernels
danieldk HF Staff commited on
Commit
8776433
·
verified ·
1 Parent(s): 22aaf36

Build uploaded using `kernels`.

Browse files
Files changed (43) hide show
  1. .gitattributes +7 -0
  2. build/torch210-cxx11-cu126-x86_64-linux/__init__.py +26 -0
  3. build/torch210-cxx11-cu126-x86_64-linux/_layer_norm_cuda_143103b.abi3.so +3 -0
  4. build/torch210-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  5. build/torch210-cxx11-cu126-x86_64-linux/layer_norm/__init__.py +26 -0
  6. build/torch210-cxx11-cu126-x86_64-linux/layers.py +51 -0
  7. build/torch210-cxx11-cu126-x86_64-linux/metadata.json +13 -0
  8. build/torch210-cxx11-cu128-x86_64-linux/__init__.py +26 -0
  9. build/torch210-cxx11-cu128-x86_64-linux/_layer_norm_cuda_143103b.abi3.so +3 -0
  10. build/torch210-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  11. build/torch210-cxx11-cu128-x86_64-linux/layer_norm/__init__.py +26 -0
  12. build/torch210-cxx11-cu128-x86_64-linux/layers.py +51 -0
  13. build/torch210-cxx11-cu128-x86_64-linux/metadata.json +15 -0
  14. build/torch210-cxx11-cu130-x86_64-linux/__init__.py +26 -0
  15. build/torch210-cxx11-cu130-x86_64-linux/_layer_norm_cuda_143103b.abi3.so +3 -0
  16. build/torch210-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  17. build/torch210-cxx11-cu130-x86_64-linux/layer_norm/__init__.py +26 -0
  18. build/torch210-cxx11-cu130-x86_64-linux/layers.py +51 -0
  19. build/torch210-cxx11-cu130-x86_64-linux/metadata.json +15 -0
  20. build/torch211-cxx11-cu126-x86_64-linux/__init__.py +26 -0
  21. build/torch211-cxx11-cu126-x86_64-linux/_layer_norm_cuda_143103b.abi3.so +3 -0
  22. build/torch211-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  23. build/torch211-cxx11-cu126-x86_64-linux/layer_norm/__init__.py +26 -0
  24. build/torch211-cxx11-cu126-x86_64-linux/layers.py +51 -0
  25. build/torch211-cxx11-cu126-x86_64-linux/metadata.json +13 -0
  26. build/torch211-cxx11-cu128-x86_64-linux/__init__.py +26 -0
  27. build/torch211-cxx11-cu128-x86_64-linux/_layer_norm_cuda_143103b.abi3.so +3 -0
  28. build/torch211-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  29. build/torch211-cxx11-cu128-x86_64-linux/layer_norm/__init__.py +26 -0
  30. build/torch211-cxx11-cu128-x86_64-linux/layers.py +51 -0
  31. build/torch211-cxx11-cu128-x86_64-linux/metadata.json +15 -0
  32. build/torch211-cxx11-cu130-x86_64-linux/__init__.py +26 -0
  33. build/torch211-cxx11-cu130-x86_64-linux/_layer_norm_cuda_143103b.abi3.so +3 -0
  34. build/torch211-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  35. build/torch211-cxx11-cu130-x86_64-linux/layer_norm/__init__.py +26 -0
  36. build/torch211-cxx11-cu130-x86_64-linux/layers.py +51 -0
  37. build/torch211-cxx11-cu130-x86_64-linux/metadata.json +15 -0
  38. build/torch29-cxx11-cu129-x86_64-linux/__init__.py +26 -0
  39. build/torch29-cxx11-cu129-x86_64-linux/_layer_norm_cuda_143103b.abi3.so +3 -0
  40. build/torch29-cxx11-cu129-x86_64-linux/_ops.py +9 -0
  41. build/torch29-cxx11-cu129-x86_64-linux/layer_norm/__init__.py +26 -0
  42. build/torch29-cxx11-cu129-x86_64-linux/layers.py +51 -0
  43. build/torch29-cxx11-cu129-x86_64-linux/metadata.json +15 -0
.gitattributes CHANGED
@@ -40,3 +40,10 @@ build/torch211-cxx11-cu126-aarch64-linux/_layer_norm_cuda_143103b.abi3.so filter
40
  build/torch211-cxx11-cu128-aarch64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
41
  build/torch211-cxx11-cu130-aarch64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
42
  build/torch29-cxx11-cu129-aarch64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
40
  build/torch211-cxx11-cu128-aarch64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
41
  build/torch211-cxx11-cu130-aarch64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
42
  build/torch29-cxx11-cu129-aarch64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
43
+ build/torch210-cxx11-cu126-x86_64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
44
+ build/torch210-cxx11-cu128-x86_64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
45
+ build/torch210-cxx11-cu130-x86_64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
46
+ build/torch211-cxx11-cu126-x86_64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
47
+ build/torch211-cxx11-cu128-x86_64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
48
+ build/torch211-cxx11-cu130-x86_64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
49
+ build/torch29-cxx11-cu129-x86_64-linux/_layer_norm_cuda_143103b.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch210-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+ from . import layers
7
+
8
+ def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
+ return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
+
11
+ def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
+ return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
+
14
+ def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
+ return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
+
17
+ def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
+ return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
+
20
+ __all__ = [
21
+ "layers",
22
+ "dropout_add_ln_fwd",
23
+ "dropout_add_ln_bwd",
24
+ "dropout_add_ln_parallel_residual_fwd",
25
+ "dropout_add_ln_parallel_residual_bwd",
26
+ ]
build/torch210-cxx11-cu126-x86_64-linux/_layer_norm_cuda_143103b.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e466713afb0ca13d2c60a66eae845ddfcaf1ac98e5297d52068e54da831564c4
3
+ size 712093824
build/torch210-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _layer_norm_cuda_143103b
3
+ ops = torch.ops._layer_norm_cuda_143103b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_layer_norm_cuda_143103b::{op_name}"
build/torch210-cxx11-cu126-x86_64-linux/layer_norm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu126-x86_64-linux/layers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LayerNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
+ gamma = self.weight,
15
+ beta = None,
16
+ rowscale = None,
17
+ colscale = None,
18
+ x0_subset = None,
19
+ z_subset = None,
20
+ dropout_p = 0,
21
+ epsilon = self.variance_epsilon,
22
+ rowscale_const = 1.0,
23
+ z_numrows = hidden_states.shape[1],
24
+ gen = None,
25
+ residual_in_fp32 = False,
26
+ is_rms_norm = False,
27
+ )
28
+ return output[0].view(hidden_states.shape)
29
+
30
+ class LlamaRMSNorm(nn.Module):
31
+ weight: torch.Tensor
32
+ variance_epsilon: float
33
+
34
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
+ output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
+ gamma = self.weight,
38
+ beta = None,
39
+ rowscale = None,
40
+ colscale = None,
41
+ x0_subset = None,
42
+ z_subset = None,
43
+ dropout_p = 0,
44
+ epsilon = self.variance_epsilon,
45
+ rowscale_const = 1.0,
46
+ z_numrows = hidden_states.shape[1],
47
+ gen = None,
48
+ residual_in_fp32 = False,
49
+ is_rms_norm = True,
50
+ )
51
+ return output[0].view(hidden_states.shape)
build/torch210-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "BSD-3-Clause",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "8.0",
9
+ "8.9",
10
+ "9.0"
11
+ ]
12
+ }
13
+ }
build/torch210-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+ from . import layers
7
+
8
+ def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
+ return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
+
11
+ def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
+ return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
+
14
+ def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
+ return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
+
17
+ def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
+ return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
+
20
+ __all__ = [
21
+ "layers",
22
+ "dropout_add_ln_fwd",
23
+ "dropout_add_ln_bwd",
24
+ "dropout_add_ln_parallel_residual_fwd",
25
+ "dropout_add_ln_parallel_residual_bwd",
26
+ ]
build/torch210-cxx11-cu128-x86_64-linux/_layer_norm_cuda_143103b.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ac7b943c6c28ccdfd408ef86ccc7ae5eb21adc6385f8e832141e0cbccd3eecd
3
+ size 1231419520
build/torch210-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _layer_norm_cuda_143103b
3
+ ops = torch.ops._layer_norm_cuda_143103b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_layer_norm_cuda_143103b::{op_name}"
build/torch210-cxx11-cu128-x86_64-linux/layer_norm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu128-x86_64-linux/layers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LayerNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
+ gamma = self.weight,
15
+ beta = None,
16
+ rowscale = None,
17
+ colscale = None,
18
+ x0_subset = None,
19
+ z_subset = None,
20
+ dropout_p = 0,
21
+ epsilon = self.variance_epsilon,
22
+ rowscale_const = 1.0,
23
+ z_numrows = hidden_states.shape[1],
24
+ gen = None,
25
+ residual_in_fp32 = False,
26
+ is_rms_norm = False,
27
+ )
28
+ return output[0].view(hidden_states.shape)
29
+
30
+ class LlamaRMSNorm(nn.Module):
31
+ weight: torch.Tensor
32
+ variance_epsilon: float
33
+
34
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
+ output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
+ gamma = self.weight,
38
+ beta = None,
39
+ rowscale = None,
40
+ colscale = None,
41
+ x0_subset = None,
42
+ z_subset = None,
43
+ dropout_p = 0,
44
+ epsilon = self.variance_epsilon,
45
+ rowscale_const = 1.0,
46
+ z_numrows = hidden_states.shape[1],
47
+ gen = None,
48
+ residual_in_fp32 = False,
49
+ is_rms_norm = True,
50
+ )
51
+ return output[0].view(hidden_states.shape)
build/torch210-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "BSD-3-Clause",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch210-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+ from . import layers
7
+
8
+ def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
+ return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
+
11
+ def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
+ return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
+
14
+ def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
+ return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
+
17
+ def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
+ return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
+
20
+ __all__ = [
21
+ "layers",
22
+ "dropout_add_ln_fwd",
23
+ "dropout_add_ln_bwd",
24
+ "dropout_add_ln_parallel_residual_fwd",
25
+ "dropout_add_ln_parallel_residual_bwd",
26
+ ]
build/torch210-cxx11-cu130-x86_64-linux/_layer_norm_cuda_143103b.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3821d36be629ae880f15967c86f432bb1c26b4d1dfef430d7f905121eaf0781b
3
+ size 1238332560
build/torch210-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _layer_norm_cuda_143103b
3
+ ops = torch.ops._layer_norm_cuda_143103b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_layer_norm_cuda_143103b::{op_name}"
build/torch210-cxx11-cu130-x86_64-linux/layer_norm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu130-x86_64-linux/layers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LayerNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
+ gamma = self.weight,
15
+ beta = None,
16
+ rowscale = None,
17
+ colscale = None,
18
+ x0_subset = None,
19
+ z_subset = None,
20
+ dropout_p = 0,
21
+ epsilon = self.variance_epsilon,
22
+ rowscale_const = 1.0,
23
+ z_numrows = hidden_states.shape[1],
24
+ gen = None,
25
+ residual_in_fp32 = False,
26
+ is_rms_norm = False,
27
+ )
28
+ return output[0].view(hidden_states.shape)
29
+
30
+ class LlamaRMSNorm(nn.Module):
31
+ weight: torch.Tensor
32
+ variance_epsilon: float
33
+
34
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
+ output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
+ gamma = self.weight,
38
+ beta = None,
39
+ rowscale = None,
40
+ colscale = None,
41
+ x0_subset = None,
42
+ z_subset = None,
43
+ dropout_p = 0,
44
+ epsilon = self.variance_epsilon,
45
+ rowscale_const = 1.0,
46
+ z_numrows = hidden_states.shape[1],
47
+ gen = None,
48
+ residual_in_fp32 = False,
49
+ is_rms_norm = True,
50
+ )
51
+ return output[0].view(hidden_states.shape)
build/torch210-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "BSD-3-Clause",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch211-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+ from . import layers
7
+
8
+ def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
+ return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
+
11
+ def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
+ return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
+
14
+ def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
+ return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
+
17
+ def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
+ return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
+
20
+ __all__ = [
21
+ "layers",
22
+ "dropout_add_ln_fwd",
23
+ "dropout_add_ln_bwd",
24
+ "dropout_add_ln_parallel_residual_fwd",
25
+ "dropout_add_ln_parallel_residual_bwd",
26
+ ]
build/torch211-cxx11-cu126-x86_64-linux/_layer_norm_cuda_143103b.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa655742dc940fbe87dc0d87a17bbd1dc6a4d7a5fb99ec4d3f0f16bbb875243d
3
+ size 712082776
build/torch211-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _layer_norm_cuda_143103b
3
+ ops = torch.ops._layer_norm_cuda_143103b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_layer_norm_cuda_143103b::{op_name}"
build/torch211-cxx11-cu126-x86_64-linux/layer_norm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu126-x86_64-linux/layers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LayerNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
+ gamma = self.weight,
15
+ beta = None,
16
+ rowscale = None,
17
+ colscale = None,
18
+ x0_subset = None,
19
+ z_subset = None,
20
+ dropout_p = 0,
21
+ epsilon = self.variance_epsilon,
22
+ rowscale_const = 1.0,
23
+ z_numrows = hidden_states.shape[1],
24
+ gen = None,
25
+ residual_in_fp32 = False,
26
+ is_rms_norm = False,
27
+ )
28
+ return output[0].view(hidden_states.shape)
29
+
30
+ class LlamaRMSNorm(nn.Module):
31
+ weight: torch.Tensor
32
+ variance_epsilon: float
33
+
34
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
+ output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
+ gamma = self.weight,
38
+ beta = None,
39
+ rowscale = None,
40
+ colscale = None,
41
+ x0_subset = None,
42
+ z_subset = None,
43
+ dropout_p = 0,
44
+ epsilon = self.variance_epsilon,
45
+ rowscale_const = 1.0,
46
+ z_numrows = hidden_states.shape[1],
47
+ gen = None,
48
+ residual_in_fp32 = False,
49
+ is_rms_norm = True,
50
+ )
51
+ return output[0].view(hidden_states.shape)
build/torch211-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "BSD-3-Clause",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "8.0",
9
+ "8.9",
10
+ "9.0"
11
+ ]
12
+ }
13
+ }
build/torch211-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+ from . import layers
7
+
8
+ def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
+ return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
+
11
+ def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
+ return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
+
14
+ def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
+ return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
+
17
+ def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
+ return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
+
20
+ __all__ = [
21
+ "layers",
22
+ "dropout_add_ln_fwd",
23
+ "dropout_add_ln_bwd",
24
+ "dropout_add_ln_parallel_residual_fwd",
25
+ "dropout_add_ln_parallel_residual_bwd",
26
+ ]
build/torch211-cxx11-cu128-x86_64-linux/_layer_norm_cuda_143103b.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:047a71d1ae85841cc6d6a1a3fdd70aeda0f5f23ded37e74537d1f21d2f47d670
3
+ size 1231408464
build/torch211-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _layer_norm_cuda_143103b
3
+ ops = torch.ops._layer_norm_cuda_143103b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_layer_norm_cuda_143103b::{op_name}"
build/torch211-cxx11-cu128-x86_64-linux/layer_norm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu128-x86_64-linux/layers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LayerNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
+ gamma = self.weight,
15
+ beta = None,
16
+ rowscale = None,
17
+ colscale = None,
18
+ x0_subset = None,
19
+ z_subset = None,
20
+ dropout_p = 0,
21
+ epsilon = self.variance_epsilon,
22
+ rowscale_const = 1.0,
23
+ z_numrows = hidden_states.shape[1],
24
+ gen = None,
25
+ residual_in_fp32 = False,
26
+ is_rms_norm = False,
27
+ )
28
+ return output[0].view(hidden_states.shape)
29
+
30
+ class LlamaRMSNorm(nn.Module):
31
+ weight: torch.Tensor
32
+ variance_epsilon: float
33
+
34
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
+ output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
+ gamma = self.weight,
38
+ beta = None,
39
+ rowscale = None,
40
+ colscale = None,
41
+ x0_subset = None,
42
+ z_subset = None,
43
+ dropout_p = 0,
44
+ epsilon = self.variance_epsilon,
45
+ rowscale_const = 1.0,
46
+ z_numrows = hidden_states.shape[1],
47
+ gen = None,
48
+ residual_in_fp32 = False,
49
+ is_rms_norm = True,
50
+ )
51
+ return output[0].view(hidden_states.shape)
build/torch211-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "BSD-3-Clause",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch211-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+ from . import layers
7
+
8
+ def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
+ return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
+
11
+ def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
+ return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
+
14
+ def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
+ return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
+
17
+ def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
+ return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
+
20
+ __all__ = [
21
+ "layers",
22
+ "dropout_add_ln_fwd",
23
+ "dropout_add_ln_bwd",
24
+ "dropout_add_ln_parallel_residual_fwd",
25
+ "dropout_add_ln_parallel_residual_bwd",
26
+ ]
build/torch211-cxx11-cu130-x86_64-linux/_layer_norm_cuda_143103b.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eb22c7b8606869f2aaeb4e5163bfae654ab5a405b123a102cf6568fda508ca0
3
+ size 1238325592
build/torch211-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _layer_norm_cuda_143103b
3
+ ops = torch.ops._layer_norm_cuda_143103b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_layer_norm_cuda_143103b::{op_name}"
build/torch211-cxx11-cu130-x86_64-linux/layer_norm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu130-x86_64-linux/layers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LayerNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
+ gamma = self.weight,
15
+ beta = None,
16
+ rowscale = None,
17
+ colscale = None,
18
+ x0_subset = None,
19
+ z_subset = None,
20
+ dropout_p = 0,
21
+ epsilon = self.variance_epsilon,
22
+ rowscale_const = 1.0,
23
+ z_numrows = hidden_states.shape[1],
24
+ gen = None,
25
+ residual_in_fp32 = False,
26
+ is_rms_norm = False,
27
+ )
28
+ return output[0].view(hidden_states.shape)
29
+
30
+ class LlamaRMSNorm(nn.Module):
31
+ weight: torch.Tensor
32
+ variance_epsilon: float
33
+
34
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
+ output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
+ gamma = self.weight,
38
+ beta = None,
39
+ rowscale = None,
40
+ colscale = None,
41
+ x0_subset = None,
42
+ z_subset = None,
43
+ dropout_p = 0,
44
+ epsilon = self.variance_epsilon,
45
+ rowscale_const = 1.0,
46
+ z_numrows = hidden_states.shape[1],
47
+ gen = None,
48
+ residual_in_fp32 = False,
49
+ is_rms_norm = True,
50
+ )
51
+ return output[0].view(hidden_states.shape)
build/torch211-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "BSD-3-Clause",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch29-cxx11-cu129-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+ from . import layers
7
+
8
+ def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
+ return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
+
11
+ def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
+ return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
+
14
+ def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
+ return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
+
17
+ def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
+ return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
+
20
+ __all__ = [
21
+ "layers",
22
+ "dropout_add_ln_fwd",
23
+ "dropout_add_ln_bwd",
24
+ "dropout_add_ln_parallel_residual_fwd",
25
+ "dropout_add_ln_parallel_residual_bwd",
26
+ ]
build/torch29-cxx11-cu129-x86_64-linux/_layer_norm_cuda_143103b.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0e08047c46ba37ec87cdd6ebd77249d8e347737318de96cdc011bad18dd61a
3
+ size 1283022120
build/torch29-cxx11-cu129-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _layer_norm_cuda_143103b
3
+ ops = torch.ops._layer_norm_cuda_143103b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_layer_norm_cuda_143103b::{op_name}"
build/torch29-cxx11-cu129-x86_64-linux/layer_norm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu129-x86_64-linux/layers.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class LayerNorm(nn.Module):
8
+ weight: torch.Tensor
9
+ variance_epsilon: float
10
+
11
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
+ output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
+ gamma = self.weight,
15
+ beta = None,
16
+ rowscale = None,
17
+ colscale = None,
18
+ x0_subset = None,
19
+ z_subset = None,
20
+ dropout_p = 0,
21
+ epsilon = self.variance_epsilon,
22
+ rowscale_const = 1.0,
23
+ z_numrows = hidden_states.shape[1],
24
+ gen = None,
25
+ residual_in_fp32 = False,
26
+ is_rms_norm = False,
27
+ )
28
+ return output[0].view(hidden_states.shape)
29
+
30
+ class LlamaRMSNorm(nn.Module):
31
+ weight: torch.Tensor
32
+ variance_epsilon: float
33
+
34
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
+ output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
+ gamma = self.weight,
38
+ beta = None,
39
+ rowscale = None,
40
+ colscale = None,
41
+ x0_subset = None,
42
+ z_subset = None,
43
+ dropout_p = 0,
44
+ epsilon = self.variance_epsilon,
45
+ rowscale_const = 1.0,
46
+ z_numrows = hidden_states.shape[1],
47
+ gen = None,
48
+ residual_in_fp32 = False,
49
+ is_rms_norm = True,
50
+ )
51
+ return output[0].view(hidden_states.shape)
build/torch29-cxx11-cu129-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "BSD-3-Clause",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }