Kernels
danieldk HF Staff commited on
Commit
4d0e03b
·
verified ·
1 Parent(s): 74459d2

Build uploaded using `kernels`.

Browse files
Files changed (36) hide show
  1. build/torch210-cxx11-cu126-x86_64-linux/{_activation_63b875f.abi3.so → _activation_cuda_5e1630d.abi3.so} +2 -2
  2. build/torch210-cxx11-cu126-x86_64-linux/_ops.py +3 -3
  3. build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py +2 -2
  4. build/torch210-cxx11-cu126-x86_64-linux/metadata.json +16 -2
  5. build/torch210-cxx11-cu128-x86_64-linux/{_activation_63b875f.abi3.so → _activation_cuda_5e1630d.abi3.so} +2 -2
  6. build/torch210-cxx11-cu128-x86_64-linux/_ops.py +3 -3
  7. build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py +2 -2
  8. build/torch210-cxx11-cu128-x86_64-linux/metadata.json +19 -2
  9. build/torch210-cxx11-cu130-x86_64-linux/{_activation_63b875f.abi3.so → _activation_cuda_5e1630d.abi3.so} +2 -2
  10. build/torch210-cxx11-cu130-x86_64-linux/_ops.py +3 -3
  11. build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py +2 -2
  12. build/torch210-cxx11-cu130-x86_64-linux/metadata.json +17 -2
  13. build/torch211-cxx11-cu126-x86_64-linux/__init__.py +75 -0
  14. build/torch211-cxx11-cu126-x86_64-linux/_activation_cuda_5e1630d.abi3.so +3 -0
  15. build/torch211-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  16. build/torch211-cxx11-cu126-x86_64-linux/activation/__init__.py +26 -0
  17. build/torch211-cxx11-cu126-x86_64-linux/layers.py +201 -0
  18. build/torch211-cxx11-cu126-x86_64-linux/metadata.json +18 -0
  19. build/torch211-cxx11-cu128-x86_64-linux/__init__.py +75 -0
  20. build/torch211-cxx11-cu128-x86_64-linux/_activation_cuda_5e1630d.abi3.so +3 -0
  21. build/torch211-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  22. build/torch211-cxx11-cu128-x86_64-linux/activation/__init__.py +26 -0
  23. build/torch211-cxx11-cu128-x86_64-linux/layers.py +201 -0
  24. build/torch211-cxx11-cu128-x86_64-linux/metadata.json +21 -0
  25. build/torch211-cxx11-cu130-x86_64-linux/__init__.py +75 -0
  26. build/torch211-cxx11-cu130-x86_64-linux/_activation_cuda_5e1630d.abi3.so +3 -0
  27. build/torch211-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  28. build/torch211-cxx11-cu130-x86_64-linux/activation/__init__.py +26 -0
  29. build/torch211-cxx11-cu130-x86_64-linux/layers.py +201 -0
  30. build/torch211-cxx11-cu130-x86_64-linux/metadata.json +19 -0
  31. build/torch29-cxx11-cu129-x86_64-linux/__init__.py +75 -0
  32. build/torch29-cxx11-cu129-x86_64-linux/_activation_cuda_5e1630d.abi3.so +3 -0
  33. build/torch29-cxx11-cu129-x86_64-linux/_ops.py +9 -0
  34. build/torch29-cxx11-cu129-x86_64-linux/activation/__init__.py +26 -0
  35. build/torch29-cxx11-cu129-x86_64-linux/layers.py +201 -0
  36. build/torch29-cxx11-cu129-x86_64-linux/metadata.json +21 -0
build/torch210-cxx11-cu126-x86_64-linux/{_activation_63b875f.abi3.so → _activation_cuda_5e1630d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5aaa5c4a7e3fe6967d10f5bc25c899507a57f2fd941c9aff02bd9ded610d9542
3
- size 3126824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a0be3112850b924da942b3913629a7ab0681277b29b23e34bfd79e24d16b2f
3
+ size 3126848
build/torch210-cxx11-cu126-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_63b875f
3
- ops = torch.ops._activation_63b875f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_63b875f::{op_name}"
 
1
  import torch
2
+ from . import _activation_cuda_5e1630d
3
+ ops = torch.ops._activation_cuda_5e1630d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_cuda_5e1630d::{op_name}"
build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu126-x86_64-linux/metadata.json CHANGED
@@ -1,4 +1,18 @@
1
  {
2
  "version": 1,
3
- "python-depends": []
4
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
  "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "7.0",
9
+ "7.2",
10
+ "7.5",
11
+ "8.0",
12
+ "8.6",
13
+ "8.7",
14
+ "8.9",
15
+ "9.0+PTX"
16
+ ]
17
+ }
18
+ }
build/torch210-cxx11-cu128-x86_64-linux/{_activation_63b875f.abi3.so → _activation_cuda_5e1630d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e4d411f1093b8e4ec62529e9eb94550fc4ff8efc073e20eaedd7ea587885390
3
- size 4406608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5d89134ddac2eee668ec060a853c99a6a3099a05b01e6a372cfa89b25c9a4d5
3
+ size 4406632
build/torch210-cxx11-cu128-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_63b875f
3
- ops = torch.ops._activation_63b875f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_63b875f::{op_name}"
 
1
  import torch
2
+ from . import _activation_cuda_5e1630d
3
+ ops = torch.ops._activation_cuda_5e1630d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_cuda_5e1630d::{op_name}"
build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu128-x86_64-linux/metadata.json CHANGED
@@ -1,4 +1,21 @@
1
  {
2
  "version": 1,
3
- "python-depends": []
4
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
  "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "10.1",
10
+ "12.0+PTX",
11
+ "7.0",
12
+ "7.2",
13
+ "7.5",
14
+ "8.0",
15
+ "8.6",
16
+ "8.7",
17
+ "8.9",
18
+ "9.0"
19
+ ]
20
+ }
21
+ }
build/torch210-cxx11-cu130-x86_64-linux/{_activation_63b875f.abi3.so → _activation_cuda_5e1630d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f843d365b4eadc5e5b587becaa8ba581a0e0007adf7f1fba59442dd8acf4cd42
3
- size 4190152
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b7f7097fa67bb40a27a26e2a6bdeec262eb878307336e9fb350388899e09a89
3
+ size 4190176
build/torch210-cxx11-cu130-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_63b875f
3
- ops = torch.ops._activation_63b875f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_63b875f::{op_name}"
 
1
  import torch
2
+ from . import _activation_cuda_5e1630d
3
+ ops = torch.ops._activation_cuda_5e1630d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_cuda_5e1630d::{op_name}"
build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu130-x86_64-linux/metadata.json CHANGED
@@ -1,4 +1,19 @@
1
  {
2
  "version": 1,
3
- "python-depends": []
4
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
  "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "11.0",
10
+ "12.0+PTX",
11
+ "7.5",
12
+ "8.0",
13
+ "8.6",
14
+ "8.7",
15
+ "8.9",
16
+ "9.0"
17
+ ]
18
+ }
19
+ }
build/torch211-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.mul_and_silu(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
24
+ ops.gelu_tanh_and_mul(out, x)
25
+ return out
26
+
27
+
28
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
29
+ ops.fatrelu_and_mul(out, x, threshold)
30
+ return out
31
+
32
+
33
+ def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu(out, x)
35
+ return out
36
+
37
+ def silu(out: torch.Tensor, x: torch.Tensor) -> None:
38
+ ops.silu(out, x)
39
+ return out
40
+
41
+
42
+ def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
43
+ ops.gelu_tanh(out, x)
44
+ return out
45
+
46
+
47
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
48
+ ops.gelu_fast(out, x)
49
+ return out
50
+
51
+
52
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
53
+ ops.gelu_new(out, x)
54
+ return out
55
+
56
+
57
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
58
+ ops.gelu_quick(out, x)
59
+ return out
60
+
61
+
62
+ __all__ = [
63
+ "silu_and_mul",
64
+ "mul_and_silu",
65
+ "gelu_and_mul",
66
+ "gelu_tanh_and_mul",
67
+ "fatrelu_and_mul",
68
+ "gelu_fast",
69
+ "gelu_new",
70
+ "gelu_quick",
71
+ "gelu_tanh",
72
+ "silu",
73
+ "gelu",
74
+ "layers",
75
+ ]
build/torch211-cxx11-cu126-x86_64-linux/_activation_cuda_5e1630d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c56d80bd2bbfb93ad648f3ef81e414d62a41c7b28d5221f51c5659ba1dd316b0
3
+ size 3119768
build/torch211-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_cuda_5e1630d
3
+ ops = torch.ops._activation_cuda_5e1630d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_cuda_5e1630d::{op_name}"
build/torch211-cxx11-cu126-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu126-x86_64-linux/layers.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ """An activation function for SwiGLU.
9
+
10
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
11
+
12
+ Shapes:
13
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
14
+ return: (num_tokens, d) or (batch_size, seq_len, d)
15
+ """
16
+
17
+ can_torch_compile: bool = True
18
+
19
+ def forward(self, x: torch.Tensor):
20
+ if not x.is_contiguous():
21
+ x = x.contiguous()
22
+ d = x.shape[-1] // 2
23
+ output_shape = x.shape[:-1] + (d,)
24
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
+ ops.silu_and_mul(out, x)
26
+ return out
27
+
28
+ class Silu(nn.Module):
29
+ """An activation function for SiLU.
30
+
31
+ The function computes x -> silu(x).
32
+
33
+ Shapes:
34
+ x: (num_tokens, d) or (batch_size, seq_len, d)
35
+ return: (num_tokens, d) or (batch_size, seq_len, d)
36
+ """
37
+
38
+ can_torch_compile: bool = True
39
+
40
+ def forward(self, x: torch.Tensor):
41
+ if not x.is_contiguous():
42
+ x = x.contiguous()
43
+ out = torch.empty_like(x)
44
+ ops.silu(out, x)
45
+ return out
46
+
47
+ class Gelu(nn.Module):
48
+ """An activation function for GELU.
49
+
50
+ The function computes x -> gelu(x).
51
+
52
+ Shapes:
53
+ x: (num_tokens, d) or (batch_size, seq_len, d)
54
+ return: (num_tokens, d) or (batch_size, seq_len, d)
55
+ """
56
+
57
+ can_torch_compile: bool = True
58
+
59
+ def forward(self, x: torch.Tensor):
60
+ if not x.is_contiguous():
61
+ x = x.contiguous()
62
+ out = torch.empty_like(x)
63
+ ops.gelu(out, x)
64
+ return out
65
+
66
+ class GeluTanh(nn.Module):
67
+ """An activation function for GELU with `tanh` approximation.
68
+
69
+ The function computes x -> gelu_tanh(x).
70
+
71
+ Shapes:
72
+ x: (num_tokens, d) or (batch_size, seq_len, d)
73
+ return: (num_tokens, d) or (batch_size, seq_len, d)
74
+ """
75
+
76
+ can_torch_compile: bool = True
77
+
78
+ def forward(self, x: torch.Tensor):
79
+ if not x.is_contiguous():
80
+ x = x.contiguous()
81
+ out = torch.empty_like(x)
82
+ ops.gelu_tanh(out, x)
83
+ return out
84
+
85
+
86
+ class MulAndSilu(nn.Module):
87
+ """An activation function for SwiGLU.
88
+
89
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
90
+
91
+ Shapes:
92
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
93
+ return: (num_tokens, d) or (batch_size, seq_len, d)
94
+ """
95
+
96
+ can_torch_compile: bool = True
97
+
98
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
99
+ if not x.is_contiguous():
100
+ x = x.contiguous()
101
+ d = x.shape[-1] // 2
102
+ output_shape = x.shape[:-1] + (d,)
103
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
104
+ ops.mul_and_silu(out, x)
105
+ return out
106
+
107
+
108
+ class GeluAndMul(nn.Module):
109
+ """An activation function for GeGLU.
110
+
111
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
112
+
113
+ Shapes:
114
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
115
+ return: (batch_size, seq_len, d) or (num_tokens, d)
116
+ """
117
+
118
+ can_torch_compile: bool = True
119
+
120
+ def forward(self, x: torch.Tensor):
121
+ if not x.is_contiguous():
122
+ x = x.contiguous()
123
+ d = x.shape[-1] // 2
124
+ output_shape = x.shape[:-1] + (d,)
125
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
126
+ ops.gelu_and_mul(out, x)
127
+ return out
128
+
129
+
130
+ class GeluTanhAndMul(nn.Module):
131
+ can_torch_compile: bool = True
132
+
133
+ def forward(self, x: torch.Tensor):
134
+ if not x.is_contiguous():
135
+ x = x.contiguous()
136
+ d = x.shape[-1] // 2
137
+ output_shape = x.shape[:-1] + (d,)
138
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
139
+ ops.gelu_tanh_and_mul(out, x)
140
+ return out
141
+
142
+
143
+ class FatreluAndMul(nn.Module):
144
+ """An activation function for FATReLU.
145
+
146
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
147
+ d = x.shape[-1] // 2.
148
+ This is used in openbmb/MiniCPM-S-1B-sft.
149
+
150
+ Shapes:
151
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
152
+ return: (num_tokens, d) or (batch_size, seq_len, d)
153
+ """
154
+
155
+ can_torch_compile: bool = True
156
+
157
+ def __init__(self, threshold: float = 0.0):
158
+ super().__init__()
159
+ self.threshold = threshold
160
+
161
+ def forward(self, x: torch.Tensor):
162
+ if not x.is_contiguous():
163
+ x = x.contiguous()
164
+ d = x.shape[-1] // 2
165
+ output_shape = x.shape[:-1] + (d,)
166
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
167
+ ops.fatrelu_and_mul(out, x, self.threshold)
168
+ return out
169
+
170
+
171
+ class FastGELU(nn.Module):
172
+ can_torch_compile: bool = True
173
+
174
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
175
+ if not x.is_contiguous():
176
+ x = x.contiguous()
177
+ out = torch.empty_like(x)
178
+ ops.gelu_fast(out, x)
179
+ return out
180
+
181
+
182
+ class NewGELU(nn.Module):
183
+ can_torch_compile: bool = True
184
+
185
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
186
+ if not x.is_contiguous():
187
+ x = x.contiguous()
188
+ out = torch.empty_like(x)
189
+ ops.gelu_new(out, x)
190
+ return out
191
+
192
+
193
+ class QuickGELU(nn.Module):
194
+ can_torch_compile: bool = True
195
+
196
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
197
+ if not x.is_contiguous():
198
+ x = x.contiguous()
199
+ out = torch.empty_like(x)
200
+ ops.gelu_quick(out, x)
201
+ return out
build/torch211-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "7.0",
9
+ "7.2",
10
+ "7.5",
11
+ "8.0",
12
+ "8.6",
13
+ "8.7",
14
+ "8.9",
15
+ "9.0+PTX"
16
+ ]
17
+ }
18
+ }
build/torch211-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.mul_and_silu(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
24
+ ops.gelu_tanh_and_mul(out, x)
25
+ return out
26
+
27
+
28
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
29
+ ops.fatrelu_and_mul(out, x, threshold)
30
+ return out
31
+
32
+
33
+ def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu(out, x)
35
+ return out
36
+
37
+ def silu(out: torch.Tensor, x: torch.Tensor) -> None:
38
+ ops.silu(out, x)
39
+ return out
40
+
41
+
42
+ def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
43
+ ops.gelu_tanh(out, x)
44
+ return out
45
+
46
+
47
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
48
+ ops.gelu_fast(out, x)
49
+ return out
50
+
51
+
52
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
53
+ ops.gelu_new(out, x)
54
+ return out
55
+
56
+
57
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
58
+ ops.gelu_quick(out, x)
59
+ return out
60
+
61
+
62
+ __all__ = [
63
+ "silu_and_mul",
64
+ "mul_and_silu",
65
+ "gelu_and_mul",
66
+ "gelu_tanh_and_mul",
67
+ "fatrelu_and_mul",
68
+ "gelu_fast",
69
+ "gelu_new",
70
+ "gelu_quick",
71
+ "gelu_tanh",
72
+ "silu",
73
+ "gelu",
74
+ "layers",
75
+ ]
build/torch211-cxx11-cu128-x86_64-linux/_activation_cuda_5e1630d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf04bec1ab934e3f8f0f60e94968067c85c4539daa4d2ffb345446debddf437a
3
+ size 4395464
build/torch211-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_cuda_5e1630d
3
+ ops = torch.ops._activation_cuda_5e1630d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_cuda_5e1630d::{op_name}"
build/torch211-cxx11-cu128-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu128-x86_64-linux/layers.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ """An activation function for SwiGLU.
9
+
10
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
11
+
12
+ Shapes:
13
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
14
+ return: (num_tokens, d) or (batch_size, seq_len, d)
15
+ """
16
+
17
+ can_torch_compile: bool = True
18
+
19
+ def forward(self, x: torch.Tensor):
20
+ if not x.is_contiguous():
21
+ x = x.contiguous()
22
+ d = x.shape[-1] // 2
23
+ output_shape = x.shape[:-1] + (d,)
24
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
+ ops.silu_and_mul(out, x)
26
+ return out
27
+
28
+ class Silu(nn.Module):
29
+ """An activation function for SiLU.
30
+
31
+ The function computes x -> silu(x).
32
+
33
+ Shapes:
34
+ x: (num_tokens, d) or (batch_size, seq_len, d)
35
+ return: (num_tokens, d) or (batch_size, seq_len, d)
36
+ """
37
+
38
+ can_torch_compile: bool = True
39
+
40
+ def forward(self, x: torch.Tensor):
41
+ if not x.is_contiguous():
42
+ x = x.contiguous()
43
+ out = torch.empty_like(x)
44
+ ops.silu(out, x)
45
+ return out
46
+
47
+ class Gelu(nn.Module):
48
+ """An activation function for GELU.
49
+
50
+ The function computes x -> gelu(x).
51
+
52
+ Shapes:
53
+ x: (num_tokens, d) or (batch_size, seq_len, d)
54
+ return: (num_tokens, d) or (batch_size, seq_len, d)
55
+ """
56
+
57
+ can_torch_compile: bool = True
58
+
59
+ def forward(self, x: torch.Tensor):
60
+ if not x.is_contiguous():
61
+ x = x.contiguous()
62
+ out = torch.empty_like(x)
63
+ ops.gelu(out, x)
64
+ return out
65
+
66
+ class GeluTanh(nn.Module):
67
+ """An activation function for GELU with `tanh` approximation.
68
+
69
+ The function computes x -> gelu_tanh(x).
70
+
71
+ Shapes:
72
+ x: (num_tokens, d) or (batch_size, seq_len, d)
73
+ return: (num_tokens, d) or (batch_size, seq_len, d)
74
+ """
75
+
76
+ can_torch_compile: bool = True
77
+
78
+ def forward(self, x: torch.Tensor):
79
+ if not x.is_contiguous():
80
+ x = x.contiguous()
81
+ out = torch.empty_like(x)
82
+ ops.gelu_tanh(out, x)
83
+ return out
84
+
85
+
86
+ class MulAndSilu(nn.Module):
87
+ """An activation function for SwiGLU.
88
+
89
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
90
+
91
+ Shapes:
92
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
93
+ return: (num_tokens, d) or (batch_size, seq_len, d)
94
+ """
95
+
96
+ can_torch_compile: bool = True
97
+
98
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
99
+ if not x.is_contiguous():
100
+ x = x.contiguous()
101
+ d = x.shape[-1] // 2
102
+ output_shape = x.shape[:-1] + (d,)
103
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
104
+ ops.mul_and_silu(out, x)
105
+ return out
106
+
107
+
108
+ class GeluAndMul(nn.Module):
109
+ """An activation function for GeGLU.
110
+
111
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
112
+
113
+ Shapes:
114
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
115
+ return: (batch_size, seq_len, d) or (num_tokens, d)
116
+ """
117
+
118
+ can_torch_compile: bool = True
119
+
120
+ def forward(self, x: torch.Tensor):
121
+ if not x.is_contiguous():
122
+ x = x.contiguous()
123
+ d = x.shape[-1] // 2
124
+ output_shape = x.shape[:-1] + (d,)
125
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
126
+ ops.gelu_and_mul(out, x)
127
+ return out
128
+
129
+
130
+ class GeluTanhAndMul(nn.Module):
131
+ can_torch_compile: bool = True
132
+
133
+ def forward(self, x: torch.Tensor):
134
+ if not x.is_contiguous():
135
+ x = x.contiguous()
136
+ d = x.shape[-1] // 2
137
+ output_shape = x.shape[:-1] + (d,)
138
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
139
+ ops.gelu_tanh_and_mul(out, x)
140
+ return out
141
+
142
+
143
+ class FatreluAndMul(nn.Module):
144
+ """An activation function for FATReLU.
145
+
146
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
147
+ d = x.shape[-1] // 2.
148
+ This is used in openbmb/MiniCPM-S-1B-sft.
149
+
150
+ Shapes:
151
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
152
+ return: (num_tokens, d) or (batch_size, seq_len, d)
153
+ """
154
+
155
+ can_torch_compile: bool = True
156
+
157
+ def __init__(self, threshold: float = 0.0):
158
+ super().__init__()
159
+ self.threshold = threshold
160
+
161
+ def forward(self, x: torch.Tensor):
162
+ if not x.is_contiguous():
163
+ x = x.contiguous()
164
+ d = x.shape[-1] // 2
165
+ output_shape = x.shape[:-1] + (d,)
166
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
167
+ ops.fatrelu_and_mul(out, x, self.threshold)
168
+ return out
169
+
170
+
171
+ class FastGELU(nn.Module):
172
+ can_torch_compile: bool = True
173
+
174
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
175
+ if not x.is_contiguous():
176
+ x = x.contiguous()
177
+ out = torch.empty_like(x)
178
+ ops.gelu_fast(out, x)
179
+ return out
180
+
181
+
182
+ class NewGELU(nn.Module):
183
+ can_torch_compile: bool = True
184
+
185
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
186
+ if not x.is_contiguous():
187
+ x = x.contiguous()
188
+ out = torch.empty_like(x)
189
+ ops.gelu_new(out, x)
190
+ return out
191
+
192
+
193
+ class QuickGELU(nn.Module):
194
+ can_torch_compile: bool = True
195
+
196
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
197
+ if not x.is_contiguous():
198
+ x = x.contiguous()
199
+ out = torch.empty_like(x)
200
+ ops.gelu_quick(out, x)
201
+ return out
build/torch211-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "10.1",
10
+ "12.0+PTX",
11
+ "7.0",
12
+ "7.2",
13
+ "7.5",
14
+ "8.0",
15
+ "8.6",
16
+ "8.7",
17
+ "8.9",
18
+ "9.0"
19
+ ]
20
+ }
21
+ }
build/torch211-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.mul_and_silu(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
24
+ ops.gelu_tanh_and_mul(out, x)
25
+ return out
26
+
27
+
28
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
29
+ ops.fatrelu_and_mul(out, x, threshold)
30
+ return out
31
+
32
+
33
+ def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu(out, x)
35
+ return out
36
+
37
+ def silu(out: torch.Tensor, x: torch.Tensor) -> None:
38
+ ops.silu(out, x)
39
+ return out
40
+
41
+
42
+ def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
43
+ ops.gelu_tanh(out, x)
44
+ return out
45
+
46
+
47
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
48
+ ops.gelu_fast(out, x)
49
+ return out
50
+
51
+
52
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
53
+ ops.gelu_new(out, x)
54
+ return out
55
+
56
+
57
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
58
+ ops.gelu_quick(out, x)
59
+ return out
60
+
61
+
62
+ __all__ = [
63
+ "silu_and_mul",
64
+ "mul_and_silu",
65
+ "gelu_and_mul",
66
+ "gelu_tanh_and_mul",
67
+ "fatrelu_and_mul",
68
+ "gelu_fast",
69
+ "gelu_new",
70
+ "gelu_quick",
71
+ "gelu_tanh",
72
+ "silu",
73
+ "gelu",
74
+ "layers",
75
+ ]
build/torch211-cxx11-cu130-x86_64-linux/_activation_cuda_5e1630d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fd7d9b1a41ac1bb3a64c392f13251390570a94f2021c0fcf8168ebd32e64099
3
+ size 4183096
build/torch211-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_cuda_5e1630d
3
+ ops = torch.ops._activation_cuda_5e1630d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_cuda_5e1630d::{op_name}"
build/torch211-cxx11-cu130-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu130-x86_64-linux/layers.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ """An activation function for SwiGLU.
9
+
10
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
11
+
12
+ Shapes:
13
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
14
+ return: (num_tokens, d) or (batch_size, seq_len, d)
15
+ """
16
+
17
+ can_torch_compile: bool = True
18
+
19
+ def forward(self, x: torch.Tensor):
20
+ if not x.is_contiguous():
21
+ x = x.contiguous()
22
+ d = x.shape[-1] // 2
23
+ output_shape = x.shape[:-1] + (d,)
24
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
+ ops.silu_and_mul(out, x)
26
+ return out
27
+
28
+ class Silu(nn.Module):
29
+ """An activation function for SiLU.
30
+
31
+ The function computes x -> silu(x).
32
+
33
+ Shapes:
34
+ x: (num_tokens, d) or (batch_size, seq_len, d)
35
+ return: (num_tokens, d) or (batch_size, seq_len, d)
36
+ """
37
+
38
+ can_torch_compile: bool = True
39
+
40
+ def forward(self, x: torch.Tensor):
41
+ if not x.is_contiguous():
42
+ x = x.contiguous()
43
+ out = torch.empty_like(x)
44
+ ops.silu(out, x)
45
+ return out
46
+
47
+ class Gelu(nn.Module):
48
+ """An activation function for GELU.
49
+
50
+ The function computes x -> gelu(x).
51
+
52
+ Shapes:
53
+ x: (num_tokens, d) or (batch_size, seq_len, d)
54
+ return: (num_tokens, d) or (batch_size, seq_len, d)
55
+ """
56
+
57
+ can_torch_compile: bool = True
58
+
59
+ def forward(self, x: torch.Tensor):
60
+ if not x.is_contiguous():
61
+ x = x.contiguous()
62
+ out = torch.empty_like(x)
63
+ ops.gelu(out, x)
64
+ return out
65
+
66
+ class GeluTanh(nn.Module):
67
+ """An activation function for GELU with `tanh` approximation.
68
+
69
+ The function computes x -> gelu_tanh(x).
70
+
71
+ Shapes:
72
+ x: (num_tokens, d) or (batch_size, seq_len, d)
73
+ return: (num_tokens, d) or (batch_size, seq_len, d)
74
+ """
75
+
76
+ can_torch_compile: bool = True
77
+
78
+ def forward(self, x: torch.Tensor):
79
+ if not x.is_contiguous():
80
+ x = x.contiguous()
81
+ out = torch.empty_like(x)
82
+ ops.gelu_tanh(out, x)
83
+ return out
84
+
85
+
86
+ class MulAndSilu(nn.Module):
87
+ """An activation function for SwiGLU.
88
+
89
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
90
+
91
+ Shapes:
92
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
93
+ return: (num_tokens, d) or (batch_size, seq_len, d)
94
+ """
95
+
96
+ can_torch_compile: bool = True
97
+
98
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
99
+ if not x.is_contiguous():
100
+ x = x.contiguous()
101
+ d = x.shape[-1] // 2
102
+ output_shape = x.shape[:-1] + (d,)
103
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
104
+ ops.mul_and_silu(out, x)
105
+ return out
106
+
107
+
108
+ class GeluAndMul(nn.Module):
109
+ """An activation function for GeGLU.
110
+
111
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
112
+
113
+ Shapes:
114
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
115
+ return: (batch_size, seq_len, d) or (num_tokens, d)
116
+ """
117
+
118
+ can_torch_compile: bool = True
119
+
120
+ def forward(self, x: torch.Tensor):
121
+ if not x.is_contiguous():
122
+ x = x.contiguous()
123
+ d = x.shape[-1] // 2
124
+ output_shape = x.shape[:-1] + (d,)
125
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
126
+ ops.gelu_and_mul(out, x)
127
+ return out
128
+
129
+
130
+ class GeluTanhAndMul(nn.Module):
131
+ can_torch_compile: bool = True
132
+
133
+ def forward(self, x: torch.Tensor):
134
+ if not x.is_contiguous():
135
+ x = x.contiguous()
136
+ d = x.shape[-1] // 2
137
+ output_shape = x.shape[:-1] + (d,)
138
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
139
+ ops.gelu_tanh_and_mul(out, x)
140
+ return out
141
+
142
+
143
+ class FatreluAndMul(nn.Module):
144
+ """An activation function for FATReLU.
145
+
146
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
147
+ d = x.shape[-1] // 2.
148
+ This is used in openbmb/MiniCPM-S-1B-sft.
149
+
150
+ Shapes:
151
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
152
+ return: (num_tokens, d) or (batch_size, seq_len, d)
153
+ """
154
+
155
+ can_torch_compile: bool = True
156
+
157
+ def __init__(self, threshold: float = 0.0):
158
+ super().__init__()
159
+ self.threshold = threshold
160
+
161
+ def forward(self, x: torch.Tensor):
162
+ if not x.is_contiguous():
163
+ x = x.contiguous()
164
+ d = x.shape[-1] // 2
165
+ output_shape = x.shape[:-1] + (d,)
166
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
167
+ ops.fatrelu_and_mul(out, x, self.threshold)
168
+ return out
169
+
170
+
171
+ class FastGELU(nn.Module):
172
+ can_torch_compile: bool = True
173
+
174
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
175
+ if not x.is_contiguous():
176
+ x = x.contiguous()
177
+ out = torch.empty_like(x)
178
+ ops.gelu_fast(out, x)
179
+ return out
180
+
181
+
182
+ class NewGELU(nn.Module):
183
+ can_torch_compile: bool = True
184
+
185
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
186
+ if not x.is_contiguous():
187
+ x = x.contiguous()
188
+ out = torch.empty_like(x)
189
+ ops.gelu_new(out, x)
190
+ return out
191
+
192
+
193
+ class QuickGELU(nn.Module):
194
+ can_torch_compile: bool = True
195
+
196
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
197
+ if not x.is_contiguous():
198
+ x = x.contiguous()
199
+ out = torch.empty_like(x)
200
+ ops.gelu_quick(out, x)
201
+ return out
build/torch211-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "11.0",
10
+ "12.0+PTX",
11
+ "7.5",
12
+ "8.0",
13
+ "8.6",
14
+ "8.7",
15
+ "8.9",
16
+ "9.0"
17
+ ]
18
+ }
19
+ }
build/torch29-cxx11-cu129-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.mul_and_silu(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
24
+ ops.gelu_tanh_and_mul(out, x)
25
+ return out
26
+
27
+
28
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
29
+ ops.fatrelu_and_mul(out, x, threshold)
30
+ return out
31
+
32
+
33
+ def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu(out, x)
35
+ return out
36
+
37
+ def silu(out: torch.Tensor, x: torch.Tensor) -> None:
38
+ ops.silu(out, x)
39
+ return out
40
+
41
+
42
+ def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
43
+ ops.gelu_tanh(out, x)
44
+ return out
45
+
46
+
47
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
48
+ ops.gelu_fast(out, x)
49
+ return out
50
+
51
+
52
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
53
+ ops.gelu_new(out, x)
54
+ return out
55
+
56
+
57
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
58
+ ops.gelu_quick(out, x)
59
+ return out
60
+
61
+
62
+ __all__ = [
63
+ "silu_and_mul",
64
+ "mul_and_silu",
65
+ "gelu_and_mul",
66
+ "gelu_tanh_and_mul",
67
+ "fatrelu_and_mul",
68
+ "gelu_fast",
69
+ "gelu_new",
70
+ "gelu_quick",
71
+ "gelu_tanh",
72
+ "silu",
73
+ "gelu",
74
+ "layers",
75
+ ]
build/torch29-cxx11-cu129-x86_64-linux/_activation_cuda_5e1630d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99e5f0df9d07f3bc16feeaffb9863d669f677695c856045954c266c45246dc43
3
+ size 4438768
build/torch29-cxx11-cu129-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_cuda_5e1630d
3
+ ops = torch.ops._activation_cuda_5e1630d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_cuda_5e1630d::{op_name}"
build/torch29-cxx11-cu129-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu129-x86_64-linux/layers.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ """An activation function for SwiGLU.
9
+
10
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
11
+
12
+ Shapes:
13
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
14
+ return: (num_tokens, d) or (batch_size, seq_len, d)
15
+ """
16
+
17
+ can_torch_compile: bool = True
18
+
19
+ def forward(self, x: torch.Tensor):
20
+ if not x.is_contiguous():
21
+ x = x.contiguous()
22
+ d = x.shape[-1] // 2
23
+ output_shape = x.shape[:-1] + (d,)
24
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
25
+ ops.silu_and_mul(out, x)
26
+ return out
27
+
28
+ class Silu(nn.Module):
29
+ """An activation function for SiLU.
30
+
31
+ The function computes x -> silu(x).
32
+
33
+ Shapes:
34
+ x: (num_tokens, d) or (batch_size, seq_len, d)
35
+ return: (num_tokens, d) or (batch_size, seq_len, d)
36
+ """
37
+
38
+ can_torch_compile: bool = True
39
+
40
+ def forward(self, x: torch.Tensor):
41
+ if not x.is_contiguous():
42
+ x = x.contiguous()
43
+ out = torch.empty_like(x)
44
+ ops.silu(out, x)
45
+ return out
46
+
47
+ class Gelu(nn.Module):
48
+ """An activation function for GELU.
49
+
50
+ The function computes x -> gelu(x).
51
+
52
+ Shapes:
53
+ x: (num_tokens, d) or (batch_size, seq_len, d)
54
+ return: (num_tokens, d) or (batch_size, seq_len, d)
55
+ """
56
+
57
+ can_torch_compile: bool = True
58
+
59
+ def forward(self, x: torch.Tensor):
60
+ if not x.is_contiguous():
61
+ x = x.contiguous()
62
+ out = torch.empty_like(x)
63
+ ops.gelu(out, x)
64
+ return out
65
+
66
+ class GeluTanh(nn.Module):
67
+ """An activation function for GELU with `tanh` approximation.
68
+
69
+ The function computes x -> gelu_tanh(x).
70
+
71
+ Shapes:
72
+ x: (num_tokens, d) or (batch_size, seq_len, d)
73
+ return: (num_tokens, d) or (batch_size, seq_len, d)
74
+ """
75
+
76
+ can_torch_compile: bool = True
77
+
78
+ def forward(self, x: torch.Tensor):
79
+ if not x.is_contiguous():
80
+ x = x.contiguous()
81
+ out = torch.empty_like(x)
82
+ ops.gelu_tanh(out, x)
83
+ return out
84
+
85
+
86
+ class MulAndSilu(nn.Module):
87
+ """An activation function for SwiGLU.
88
+
89
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
90
+
91
+ Shapes:
92
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
93
+ return: (num_tokens, d) or (batch_size, seq_len, d)
94
+ """
95
+
96
+ can_torch_compile: bool = True
97
+
98
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
99
+ if not x.is_contiguous():
100
+ x = x.contiguous()
101
+ d = x.shape[-1] // 2
102
+ output_shape = x.shape[:-1] + (d,)
103
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
104
+ ops.mul_and_silu(out, x)
105
+ return out
106
+
107
+
108
+ class GeluAndMul(nn.Module):
109
+ """An activation function for GeGLU.
110
+
111
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
112
+
113
+ Shapes:
114
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
115
+ return: (batch_size, seq_len, d) or (num_tokens, d)
116
+ """
117
+
118
+ can_torch_compile: bool = True
119
+
120
+ def forward(self, x: torch.Tensor):
121
+ if not x.is_contiguous():
122
+ x = x.contiguous()
123
+ d = x.shape[-1] // 2
124
+ output_shape = x.shape[:-1] + (d,)
125
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
126
+ ops.gelu_and_mul(out, x)
127
+ return out
128
+
129
+
130
+ class GeluTanhAndMul(nn.Module):
131
+ can_torch_compile: bool = True
132
+
133
+ def forward(self, x: torch.Tensor):
134
+ if not x.is_contiguous():
135
+ x = x.contiguous()
136
+ d = x.shape[-1] // 2
137
+ output_shape = x.shape[:-1] + (d,)
138
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
139
+ ops.gelu_tanh_and_mul(out, x)
140
+ return out
141
+
142
+
143
+ class FatreluAndMul(nn.Module):
144
+ """An activation function for FATReLU.
145
+
146
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
147
+ d = x.shape[-1] // 2.
148
+ This is used in openbmb/MiniCPM-S-1B-sft.
149
+
150
+ Shapes:
151
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
152
+ return: (num_tokens, d) or (batch_size, seq_len, d)
153
+ """
154
+
155
+ can_torch_compile: bool = True
156
+
157
+ def __init__(self, threshold: float = 0.0):
158
+ super().__init__()
159
+ self.threshold = threshold
160
+
161
+ def forward(self, x: torch.Tensor):
162
+ if not x.is_contiguous():
163
+ x = x.contiguous()
164
+ d = x.shape[-1] // 2
165
+ output_shape = x.shape[:-1] + (d,)
166
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
167
+ ops.fatrelu_and_mul(out, x, self.threshold)
168
+ return out
169
+
170
+
171
+ class FastGELU(nn.Module):
172
+ can_torch_compile: bool = True
173
+
174
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
175
+ if not x.is_contiguous():
176
+ x = x.contiguous()
177
+ out = torch.empty_like(x)
178
+ ops.gelu_fast(out, x)
179
+ return out
180
+
181
+
182
+ class NewGELU(nn.Module):
183
+ can_torch_compile: bool = True
184
+
185
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
186
+ if not x.is_contiguous():
187
+ x = x.contiguous()
188
+ out = torch.empty_like(x)
189
+ ops.gelu_new(out, x)
190
+ return out
191
+
192
+
193
+ class QuickGELU(nn.Module):
194
+ can_torch_compile: bool = True
195
+
196
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
197
+ if not x.is_contiguous():
198
+ x = x.contiguous()
199
+ out = torch.empty_like(x)
200
+ ops.gelu_quick(out, x)
201
+ return out
build/torch29-cxx11-cu129-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "10.1",
10
+ "12.0+PTX",
11
+ "7.0",
12
+ "7.2",
13
+ "7.5",
14
+ "8.0",
15
+ "8.6",
16
+ "8.7",
17
+ "8.9",
18
+ "9.0"
19
+ ]
20
+ }
21
+ }