Kernels
danieldk HF Staff commited on
Commit
0c1f775
·
verified ·
1 Parent(s): 01c7f3b

Build uploaded using `kernels`.

Browse files
Files changed (33) hide show
  1. .gitattributes +7 -0
  2. build/torch210-cxx11-cu126-x86_64-linux/_ops.py +3 -3
  3. build/torch210-cxx11-cu126-x86_64-linux/{_rwkv_44f2fa4.abi3.so → _rwkv_cuda_5849bdb.abi3.so} +2 -2
  4. build/torch210-cxx11-cu126-x86_64-linux/metadata.json +11 -2
  5. build/torch210-cxx11-cu126-x86_64-linux/rwkv/__init__.py +2 -2
  6. build/torch210-cxx11-cu128-x86_64-linux/_ops.py +3 -3
  7. build/torch210-cxx11-cu128-x86_64-linux/{_rwkv_44f2fa4.abi3.so → _rwkv_cuda_5849bdb.abi3.so} +2 -2
  8. build/torch210-cxx11-cu128-x86_64-linux/metadata.json +13 -2
  9. build/torch210-cxx11-cu128-x86_64-linux/rwkv/__init__.py +2 -2
  10. build/torch210-cxx11-cu130-x86_64-linux/_ops.py +3 -3
  11. build/torch210-cxx11-cu130-x86_64-linux/{_rwkv_44f2fa4.abi3.so → _rwkv_cuda_5849bdb.abi3.so} +2 -2
  12. build/torch210-cxx11-cu130-x86_64-linux/metadata.json +13 -2
  13. build/torch210-cxx11-cu130-x86_64-linux/rwkv/__init__.py +2 -2
  14. build/torch211-cxx11-cu126-x86_64-linux/__init__.py +170 -0
  15. build/torch211-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  16. build/torch211-cxx11-cu126-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so +3 -0
  17. build/torch211-cxx11-cu126-x86_64-linux/metadata.json +13 -0
  18. build/torch211-cxx11-cu126-x86_64-linux/rwkv/__init__.py +26 -0
  19. build/torch211-cxx11-cu128-x86_64-linux/__init__.py +170 -0
  20. build/torch211-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  21. build/torch211-cxx11-cu128-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so +3 -0
  22. build/torch211-cxx11-cu128-x86_64-linux/metadata.json +15 -0
  23. build/torch211-cxx11-cu128-x86_64-linux/rwkv/__init__.py +26 -0
  24. build/torch211-cxx11-cu130-x86_64-linux/__init__.py +170 -0
  25. build/torch211-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  26. build/torch211-cxx11-cu130-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so +3 -0
  27. build/torch211-cxx11-cu130-x86_64-linux/metadata.json +15 -0
  28. build/torch211-cxx11-cu130-x86_64-linux/rwkv/__init__.py +26 -0
  29. build/torch29-cxx11-cu129-x86_64-linux/__init__.py +170 -0
  30. build/torch29-cxx11-cu129-x86_64-linux/_ops.py +9 -0
  31. build/torch29-cxx11-cu129-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so +3 -0
  32. build/torch29-cxx11-cu129-x86_64-linux/metadata.json +15 -0
  33. build/torch29-cxx11-cu129-x86_64-linux/rwkv/__init__.py +26 -0
.gitattributes CHANGED
@@ -101,3 +101,10 @@ build/torch211-cxx11-cu126-aarch64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs d
101
  build/torch211-cxx11-cu128-aarch64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
102
  build/torch211-cxx11-cu130-aarch64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
103
  build/torch29-cxx11-cu129-aarch64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
101
  build/torch211-cxx11-cu128-aarch64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
102
  build/torch211-cxx11-cu130-aarch64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
103
  build/torch29-cxx11-cu129-aarch64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
104
+ build/torch210-cxx11-cu126-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
105
+ build/torch210-cxx11-cu128-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
106
+ build/torch210-cxx11-cu130-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
107
+ build/torch211-cxx11-cu126-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
108
+ build/torch211-cxx11-cu128-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
109
+ build/torch211-cxx11-cu130-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
110
+ build/torch29-cxx11-cu129-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch210-cxx11-cu126-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rwkv_44f2fa4
3
- ops = torch.ops._rwkv_44f2fa4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rwkv_44f2fa4::{op_name}"
 
1
  import torch
2
+ from . import _rwkv_cuda_5849bdb
3
+ ops = torch.ops._rwkv_cuda_5849bdb
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rwkv_cuda_5849bdb::{op_name}"
build/torch210-cxx11-cu126-x86_64-linux/{_rwkv_44f2fa4.abi3.so → _rwkv_cuda_5849bdb.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a59b0f8e80bcaa0db9428cff37c26b069587772259057d9d6c51dcd37ee604dd
3
- size 2116384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef57a3b7b3028cf74874ad5f99fd237a942d20322b6c47e824cdcde75a612ac7
3
+ size 2116408
build/torch210-cxx11-cu126-x86_64-linux/metadata.json CHANGED
@@ -1,4 +1,13 @@
1
  {
2
  "version": 1,
3
- "python-depends": []
4
- }
 
 
 
 
 
 
 
 
 
 
1
  {
2
  "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "8.0",
9
+ "8.9",
10
+ "9.0"
11
+ ]
12
+ }
13
+ }
build/torch210-cxx11-cu126-x86_64-linux/rwkv/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu128-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rwkv_44f2fa4
3
- ops = torch.ops._rwkv_44f2fa4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rwkv_44f2fa4::{op_name}"
 
1
  import torch
2
+ from . import _rwkv_cuda_5849bdb
3
+ ops = torch.ops._rwkv_cuda_5849bdb
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rwkv_cuda_5849bdb::{op_name}"
build/torch210-cxx11-cu128-x86_64-linux/{_rwkv_44f2fa4.abi3.so → _rwkv_cuda_5849bdb.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96030c590c12308beae2840cc41e334e5d92a4fb5b6e66c702ded87b5672a7a0
3
- size 2318744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb33f213f9412b8120ed540d92ef1e9c70ed35590feff5d53113140ce5298ee0
3
+ size 2318768
build/torch210-cxx11-cu128-x86_64-linux/metadata.json CHANGED
@@ -1,4 +1,15 @@
1
  {
2
  "version": 1,
3
- "python-depends": []
4
- }
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
  "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch210-cxx11-cu128-x86_64-linux/rwkv/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu130-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _rwkv_44f2fa4
3
- ops = torch.ops._rwkv_44f2fa4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_rwkv_44f2fa4::{op_name}"
 
1
  import torch
2
+ from . import _rwkv_cuda_5849bdb
3
+ ops = torch.ops._rwkv_cuda_5849bdb
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_rwkv_cuda_5849bdb::{op_name}"
build/torch210-cxx11-cu130-x86_64-linux/{_rwkv_44f2fa4.abi3.so → _rwkv_cuda_5849bdb.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:706b14fb9bb07c11085e3a7a0ac29e2c8b385c95ed6963dc607f99a9fc7abb6f
3
- size 2348224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:024c9733ab553416a3165fabcfd1f8957e99b2c256c3fc5f51d1e3032200ff96
3
+ size 2348248
build/torch210-cxx11-cu130-x86_64-linux/metadata.json CHANGED
@@ -1,4 +1,15 @@
1
  {
2
  "version": 1,
3
- "python-depends": []
4
- }
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
  "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch210-cxx11-cu130-x86_64-linux/rwkv/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch211-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ from typing import Tuple, Any
3
+
4
+ # Use a broad Tensor alias to avoid importing torch at import time.
5
+ from torch import Tensor
6
+
7
+ def forward(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
8
+ """RWKV WKV forward pass (float32).
9
+
10
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
11
+
12
+ Args:
13
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
14
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
15
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
16
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
17
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
18
+
19
+ Notes:
20
+ - All tensors must be on the same CUDA device.
21
+ - Shapes must agree on ``B``, ``T`` and ``C``.
22
+ """
23
+ _validate_device_match((w, u, k, v, y))
24
+ ops.forward(w, u, k, v, y)
25
+
26
+
27
+ def forward_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
28
+ """RWKV WKV forward pass (bfloat16 inputs/outputs, float32 ``w``).
29
+
30
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
31
+
32
+ Args:
33
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
34
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
35
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
36
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
37
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
38
+
39
+ Notes:
40
+ - All tensors must be on the same CUDA device.
41
+ - Shapes must agree on ``B``, ``T`` and ``C``.
42
+ """
43
+ _validate_device_match((w, u, k, v, y))
44
+ ops.forward_bf16(w, u, k, v, y)
45
+
46
+
47
+ def forward_with_state(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
48
+ """RWKV WKV forward pass with persistent state (float32).
49
+
50
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
51
+
52
+ Args:
53
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
54
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
55
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
56
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
57
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
58
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
59
+
60
+ Notes:
61
+ - All tensors must be on the same CUDA device.
62
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
63
+ """
64
+ _validate_device_match((w, u, k, v, y, s))
65
+ ops.forward_with_state(w, u, k, v, y, s)
66
+
67
+
68
+ def forward_with_state_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
69
+ """RWKV WKV forward pass with persistent state (bfloat16 inputs/outputs, float32 ``w`` and ``s``).
70
+
71
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
72
+
73
+ Args:
74
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
75
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
76
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
77
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
78
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
79
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
80
+
81
+ Notes:
82
+ - All tensors must be on the same CUDA device.
83
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
84
+ """
85
+ _validate_device_match((w, u, k, v, y, s))
86
+ ops.forward_with_state_bf16(w, u, k, v, y, s)
87
+
88
+
89
+ def backward(
90
+ w: Tensor,
91
+ u: Tensor,
92
+ k: Tensor,
93
+ v: Tensor,
94
+ y: Tensor,
95
+ gy: Tensor,
96
+ gw: Tensor,
97
+ gu: Tensor,
98
+ gk: Tensor,
99
+ gv: Tensor,
100
+ ) -> None:
101
+ """RWKV WKV backward pass (float32).
102
+
103
+ Writes gradients into the provided tensors in-place.
104
+
105
+ Args:
106
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
107
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.float32``.
108
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.float32``.
109
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.float32`` (written in-place).
110
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
111
+
112
+ Notes:
113
+ - All tensors must be on the same CUDA device.
114
+ - Shapes must agree on ``B``, ``T`` and ``C``.
115
+ """
116
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
117
+ ops.backward(w, u, k, v, y, gy, gw, gu, gk, gv)
118
+
119
+
120
+ def backward_bf16(
121
+ w: Tensor,
122
+ u: Tensor,
123
+ k: Tensor,
124
+ v: Tensor,
125
+ y: Tensor,
126
+ gy: Tensor,
127
+ gw: Tensor,
128
+ gu: Tensor,
129
+ gk: Tensor,
130
+ gv: Tensor,
131
+ ) -> None:
132
+ """RWKV WKV backward pass (bfloat16 inputs/outputs/gradients, float32 ``w``).
133
+
134
+ Writes gradients into the provided tensors in-place.
135
+
136
+ Args:
137
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
138
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
139
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
140
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.bfloat16`` (written in-place).
141
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
142
+
143
+ Notes:
144
+ - All tensors must be on the same CUDA device.
145
+ - Shapes must agree on ``B``, ``T`` and ``C``.
146
+ """
147
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
148
+ ops.backward_bf16(w, u, k, v, y, gy, gw, gu, gk, gv)
149
+
150
+
151
+ def _validate_device_match(tensors: Tuple[Tensor, ...]) -> None:
152
+ """Minimal runtime validation that all tensors live on the same CUDA device."""
153
+ if not tensors:
154
+ return
155
+ device = tensors[0].device
156
+ if not device.type == "cuda":
157
+ raise RuntimeError("RWKV CUDA ops require CUDA tensors")
158
+ for t in tensors[1:]:
159
+ if t.device != device:
160
+ raise RuntimeError("All tensors must be on the same CUDA device")
161
+
162
+
163
+ __all__ = [
164
+ "forward",
165
+ "forward_bf16",
166
+ "forward_with_state",
167
+ "forward_with_state_bf16",
168
+ "backward",
169
+ "backward_bf16",
170
+ ]
build/torch211-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rwkv_cuda_5849bdb
3
+ ops = torch.ops._rwkv_cuda_5849bdb
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rwkv_cuda_5849bdb::{op_name}"
build/torch211-cxx11-cu126-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a55661a0185ca9375700bbd80fe85272180c0f081e115d45ef53b471f55fc1e9
3
+ size 2116408
build/torch211-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "8.0",
9
+ "8.9",
10
+ "9.0"
11
+ ]
12
+ }
13
+ }
build/torch211-cxx11-cu126-x86_64-linux/rwkv/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ from typing import Tuple, Any
3
+
4
+ # Use a broad Tensor alias to avoid importing torch at import time.
5
+ from torch import Tensor
6
+
7
+ def forward(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
8
+ """RWKV WKV forward pass (float32).
9
+
10
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
11
+
12
+ Args:
13
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
14
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
15
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
16
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
17
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
18
+
19
+ Notes:
20
+ - All tensors must be on the same CUDA device.
21
+ - Shapes must agree on ``B``, ``T`` and ``C``.
22
+ """
23
+ _validate_device_match((w, u, k, v, y))
24
+ ops.forward(w, u, k, v, y)
25
+
26
+
27
+ def forward_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
28
+ """RWKV WKV forward pass (bfloat16 inputs/outputs, float32 ``w``).
29
+
30
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
31
+
32
+ Args:
33
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
34
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
35
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
36
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
37
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
38
+
39
+ Notes:
40
+ - All tensors must be on the same CUDA device.
41
+ - Shapes must agree on ``B``, ``T`` and ``C``.
42
+ """
43
+ _validate_device_match((w, u, k, v, y))
44
+ ops.forward_bf16(w, u, k, v, y)
45
+
46
+
47
+ def forward_with_state(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
48
+ """RWKV WKV forward pass with persistent state (float32).
49
+
50
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
51
+
52
+ Args:
53
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
54
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
55
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
56
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
57
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
58
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
59
+
60
+ Notes:
61
+ - All tensors must be on the same CUDA device.
62
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
63
+ """
64
+ _validate_device_match((w, u, k, v, y, s))
65
+ ops.forward_with_state(w, u, k, v, y, s)
66
+
67
+
68
+ def forward_with_state_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
69
+ """RWKV WKV forward pass with persistent state (bfloat16 inputs/outputs, float32 ``w`` and ``s``).
70
+
71
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
72
+
73
+ Args:
74
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
75
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
76
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
77
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
78
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
79
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
80
+
81
+ Notes:
82
+ - All tensors must be on the same CUDA device.
83
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
84
+ """
85
+ _validate_device_match((w, u, k, v, y, s))
86
+ ops.forward_with_state_bf16(w, u, k, v, y, s)
87
+
88
+
89
+ def backward(
90
+ w: Tensor,
91
+ u: Tensor,
92
+ k: Tensor,
93
+ v: Tensor,
94
+ y: Tensor,
95
+ gy: Tensor,
96
+ gw: Tensor,
97
+ gu: Tensor,
98
+ gk: Tensor,
99
+ gv: Tensor,
100
+ ) -> None:
101
+ """RWKV WKV backward pass (float32).
102
+
103
+ Writes gradients into the provided tensors in-place.
104
+
105
+ Args:
106
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
107
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.float32``.
108
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.float32``.
109
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.float32`` (written in-place).
110
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
111
+
112
+ Notes:
113
+ - All tensors must be on the same CUDA device.
114
+ - Shapes must agree on ``B``, ``T`` and ``C``.
115
+ """
116
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
117
+ ops.backward(w, u, k, v, y, gy, gw, gu, gk, gv)
118
+
119
+
120
+ def backward_bf16(
121
+ w: Tensor,
122
+ u: Tensor,
123
+ k: Tensor,
124
+ v: Tensor,
125
+ y: Tensor,
126
+ gy: Tensor,
127
+ gw: Tensor,
128
+ gu: Tensor,
129
+ gk: Tensor,
130
+ gv: Tensor,
131
+ ) -> None:
132
+ """RWKV WKV backward pass (bfloat16 inputs/outputs/gradients, float32 ``w``).
133
+
134
+ Writes gradients into the provided tensors in-place.
135
+
136
+ Args:
137
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
138
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
139
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
140
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.bfloat16`` (written in-place).
141
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
142
+
143
+ Notes:
144
+ - All tensors must be on the same CUDA device.
145
+ - Shapes must agree on ``B``, ``T`` and ``C``.
146
+ """
147
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
148
+ ops.backward_bf16(w, u, k, v, y, gy, gw, gu, gk, gv)
149
+
150
+
151
+ def _validate_device_match(tensors: Tuple[Tensor, ...]) -> None:
152
+ """Minimal runtime validation that all tensors live on the same CUDA device."""
153
+ if not tensors:
154
+ return
155
+ device = tensors[0].device
156
+ if not device.type == "cuda":
157
+ raise RuntimeError("RWKV CUDA ops require CUDA tensors")
158
+ for t in tensors[1:]:
159
+ if t.device != device:
160
+ raise RuntimeError("All tensors must be on the same CUDA device")
161
+
162
+
163
+ __all__ = [
164
+ "forward",
165
+ "forward_bf16",
166
+ "forward_with_state",
167
+ "forward_with_state_bf16",
168
+ "backward",
169
+ "backward_bf16",
170
+ ]
build/torch211-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rwkv_cuda_5849bdb
3
+ ops = torch.ops._rwkv_cuda_5849bdb
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rwkv_cuda_5849bdb::{op_name}"
build/torch211-cxx11-cu128-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ba2b12cfcc7f264fcb312f71be6f98763a697e1870300120fc4294a7715d5de
3
+ size 2318768
build/torch211-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch211-cxx11-cu128-x86_64-linux/rwkv/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ from typing import Tuple, Any
3
+
4
+ # Use a broad Tensor alias to avoid importing torch at import time.
5
+ from torch import Tensor
6
+
7
+ def forward(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
8
+ """RWKV WKV forward pass (float32).
9
+
10
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
11
+
12
+ Args:
13
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
14
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
15
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
16
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
17
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
18
+
19
+ Notes:
20
+ - All tensors must be on the same CUDA device.
21
+ - Shapes must agree on ``B``, ``T`` and ``C``.
22
+ """
23
+ _validate_device_match((w, u, k, v, y))
24
+ ops.forward(w, u, k, v, y)
25
+
26
+
27
+ def forward_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
28
+ """RWKV WKV forward pass (bfloat16 inputs/outputs, float32 ``w``).
29
+
30
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
31
+
32
+ Args:
33
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
34
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
35
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
36
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
37
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
38
+
39
+ Notes:
40
+ - All tensors must be on the same CUDA device.
41
+ - Shapes must agree on ``B``, ``T`` and ``C``.
42
+ """
43
+ _validate_device_match((w, u, k, v, y))
44
+ ops.forward_bf16(w, u, k, v, y)
45
+
46
+
47
+ def forward_with_state(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
48
+ """RWKV WKV forward pass with persistent state (float32).
49
+
50
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
51
+
52
+ Args:
53
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
54
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
55
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
56
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
57
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
58
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
59
+
60
+ Notes:
61
+ - All tensors must be on the same CUDA device.
62
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
63
+ """
64
+ _validate_device_match((w, u, k, v, y, s))
65
+ ops.forward_with_state(w, u, k, v, y, s)
66
+
67
+
68
+ def forward_with_state_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
69
+ """RWKV WKV forward pass with persistent state (bfloat16 inputs/outputs, float32 ``w`` and ``s``).
70
+
71
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
72
+
73
+ Args:
74
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
75
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
76
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
77
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
78
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
79
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
80
+
81
+ Notes:
82
+ - All tensors must be on the same CUDA device.
83
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
84
+ """
85
+ _validate_device_match((w, u, k, v, y, s))
86
+ ops.forward_with_state_bf16(w, u, k, v, y, s)
87
+
88
+
89
+ def backward(
90
+ w: Tensor,
91
+ u: Tensor,
92
+ k: Tensor,
93
+ v: Tensor,
94
+ y: Tensor,
95
+ gy: Tensor,
96
+ gw: Tensor,
97
+ gu: Tensor,
98
+ gk: Tensor,
99
+ gv: Tensor,
100
+ ) -> None:
101
+ """RWKV WKV backward pass (float32).
102
+
103
+ Writes gradients into the provided tensors in-place.
104
+
105
+ Args:
106
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
107
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.float32``.
108
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.float32``.
109
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.float32`` (written in-place).
110
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
111
+
112
+ Notes:
113
+ - All tensors must be on the same CUDA device.
114
+ - Shapes must agree on ``B``, ``T`` and ``C``.
115
+ """
116
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
117
+ ops.backward(w, u, k, v, y, gy, gw, gu, gk, gv)
118
+
119
+
120
+ def backward_bf16(
121
+ w: Tensor,
122
+ u: Tensor,
123
+ k: Tensor,
124
+ v: Tensor,
125
+ y: Tensor,
126
+ gy: Tensor,
127
+ gw: Tensor,
128
+ gu: Tensor,
129
+ gk: Tensor,
130
+ gv: Tensor,
131
+ ) -> None:
132
+ """RWKV WKV backward pass (bfloat16 inputs/outputs/gradients, float32 ``w``).
133
+
134
+ Writes gradients into the provided tensors in-place.
135
+
136
+ Args:
137
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
138
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
139
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
140
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.bfloat16`` (written in-place).
141
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
142
+
143
+ Notes:
144
+ - All tensors must be on the same CUDA device.
145
+ - Shapes must agree on ``B``, ``T`` and ``C``.
146
+ """
147
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
148
+ ops.backward_bf16(w, u, k, v, y, gy, gw, gu, gk, gv)
149
+
150
+
151
+ def _validate_device_match(tensors: Tuple[Tensor, ...]) -> None:
152
+ """Minimal runtime validation that all tensors live on the same CUDA device."""
153
+ if not tensors:
154
+ return
155
+ device = tensors[0].device
156
+ if not device.type == "cuda":
157
+ raise RuntimeError("RWKV CUDA ops require CUDA tensors")
158
+ for t in tensors[1:]:
159
+ if t.device != device:
160
+ raise RuntimeError("All tensors must be on the same CUDA device")
161
+
162
+
163
+ __all__ = [
164
+ "forward",
165
+ "forward_bf16",
166
+ "forward_with_state",
167
+ "forward_with_state_bf16",
168
+ "backward",
169
+ "backward_bf16",
170
+ ]
build/torch211-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rwkv_cuda_5849bdb
3
+ ops = torch.ops._rwkv_cuda_5849bdb
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rwkv_cuda_5849bdb::{op_name}"
build/torch211-cxx11-cu130-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:582930c14cdc9d0f9a5b83ad198433bbf0d53e2a3b904a63d788f4918487f347
3
+ size 2348248
build/torch211-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch211-cxx11-cu130-x86_64-linux/rwkv/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu129-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ from typing import Tuple, Any
3
+
4
+ # Use a broad Tensor alias to avoid importing torch at import time.
5
+ from torch import Tensor
6
+
7
+ def forward(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
8
+ """RWKV WKV forward pass (float32).
9
+
10
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
11
+
12
+ Args:
13
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
14
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
15
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
16
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
17
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
18
+
19
+ Notes:
20
+ - All tensors must be on the same CUDA device.
21
+ - Shapes must agree on ``B``, ``T`` and ``C``.
22
+ """
23
+ _validate_device_match((w, u, k, v, y))
24
+ ops.forward(w, u, k, v, y)
25
+
26
+
27
+ def forward_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor) -> None:
28
+ """RWKV WKV forward pass (bfloat16 inputs/outputs, float32 ``w``).
29
+
30
+ Runs the CUDA kernel and writes the result into ``y`` in-place.
31
+
32
+ Args:
33
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
34
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
35
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
36
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
37
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
38
+
39
+ Notes:
40
+ - All tensors must be on the same CUDA device.
41
+ - Shapes must agree on ``B``, ``T`` and ``C``.
42
+ """
43
+ _validate_device_match((w, u, k, v, y))
44
+ ops.forward_bf16(w, u, k, v, y)
45
+
46
+
47
+ def forward_with_state(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
48
+ """RWKV WKV forward pass with persistent state (float32).
49
+
50
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
51
+
52
+ Args:
53
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
54
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
55
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
56
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.float32``.
57
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
58
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
59
+
60
+ Notes:
61
+ - All tensors must be on the same CUDA device.
62
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
63
+ """
64
+ _validate_device_match((w, u, k, v, y, s))
65
+ ops.forward_with_state(w, u, k, v, y, s)
66
+
67
+
68
+ def forward_with_state_bf16(w: Tensor, u: Tensor, k: Tensor, v: Tensor, y: Tensor, s: Tensor) -> None:
69
+ """RWKV WKV forward pass with persistent state (bfloat16 inputs/outputs, float32 ``w`` and ``s``).
70
+
71
+ Runs the CUDA kernel using and updating state ``s`` and writes the result into ``y``.
72
+
73
+ Args:
74
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
75
+ u: Input tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
76
+ k: Key tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
77
+ v: Value tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
78
+ y: Output tensor, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
79
+ s: Stateful tensor, shape ``[B, C]``, dtype ``torch.float32`` (updated in-place).
80
+
81
+ Notes:
82
+ - All tensors must be on the same CUDA device.
83
+ - Shapes must agree on ``B`` and ``C``; ``y`` shares ``[B, T, C]`` with inputs.
84
+ """
85
+ _validate_device_match((w, u, k, v, y, s))
86
+ ops.forward_with_state_bf16(w, u, k, v, y, s)
87
+
88
+
89
+ def backward(
90
+ w: Tensor,
91
+ u: Tensor,
92
+ k: Tensor,
93
+ v: Tensor,
94
+ y: Tensor,
95
+ gy: Tensor,
96
+ gw: Tensor,
97
+ gu: Tensor,
98
+ gk: Tensor,
99
+ gv: Tensor,
100
+ ) -> None:
101
+ """RWKV WKV backward pass (float32).
102
+
103
+ Writes gradients into the provided tensors in-place.
104
+
105
+ Args:
106
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
107
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.float32``.
108
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.float32``.
109
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.float32`` (written in-place).
110
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.float32`` (written in-place).
111
+
112
+ Notes:
113
+ - All tensors must be on the same CUDA device.
114
+ - Shapes must agree on ``B``, ``T`` and ``C``.
115
+ """
116
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
117
+ ops.backward(w, u, k, v, y, gy, gw, gu, gk, gv)
118
+
119
+
120
+ def backward_bf16(
121
+ w: Tensor,
122
+ u: Tensor,
123
+ k: Tensor,
124
+ v: Tensor,
125
+ y: Tensor,
126
+ gy: Tensor,
127
+ gw: Tensor,
128
+ gu: Tensor,
129
+ gk: Tensor,
130
+ gv: Tensor,
131
+ ) -> None:
132
+ """RWKV WKV backward pass (bfloat16 inputs/outputs/gradients, float32 ``w``).
133
+
134
+ Writes gradients into the provided tensors in-place.
135
+
136
+ Args:
137
+ w: Decay weights, shape ``[C]``, dtype ``torch.float32``.
138
+ u, k, v, y: Forward-pass tensors, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
139
+ gy: Gradient of ``y``, shape ``[B, T, C]``, dtype ``torch.bfloat16``.
140
+ gw: Gradient for ``w``, shape ``[C]``, dtype ``torch.bfloat16`` (written in-place).
141
+ gu, gk, gv: Gradients for ``u``, ``k``, ``v`` respectively, shape ``[B, T, C]``, dtype ``torch.bfloat16`` (written in-place).
142
+
143
+ Notes:
144
+ - All tensors must be on the same CUDA device.
145
+ - Shapes must agree on ``B``, ``T`` and ``C``.
146
+ """
147
+ _validate_device_match((w, u, k, v, y, gy, gw, gu, gk, gv))
148
+ ops.backward_bf16(w, u, k, v, y, gy, gw, gu, gk, gv)
149
+
150
+
151
+ def _validate_device_match(tensors: Tuple[Tensor, ...]) -> None:
152
+ """Minimal runtime validation that all tensors live on the same CUDA device."""
153
+ if not tensors:
154
+ return
155
+ device = tensors[0].device
156
+ if not device.type == "cuda":
157
+ raise RuntimeError("RWKV CUDA ops require CUDA tensors")
158
+ for t in tensors[1:]:
159
+ if t.device != device:
160
+ raise RuntimeError("All tensors must be on the same CUDA device")
161
+
162
+
163
+ __all__ = [
164
+ "forward",
165
+ "forward_bf16",
166
+ "forward_with_state",
167
+ "forward_with_state_bf16",
168
+ "backward",
169
+ "backward_bf16",
170
+ ]
build/torch29-cxx11-cu129-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rwkv_cuda_5849bdb
3
+ ops = torch.ops._rwkv_cuda_5849bdb
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rwkv_cuda_5849bdb::{op_name}"
build/torch29-cxx11-cu129-x86_64-linux/_rwkv_cuda_5849bdb.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e124cba0e78954c0aa034427c32ff799bd43f5050eeaceca2c8989841b45495
3
+ size 2335432
build/torch29-cxx11-cu129-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "12.0",
10
+ "8.0",
11
+ "8.9",
12
+ "9.0"
13
+ ]
14
+ }
15
+ }
build/torch29-cxx11-cu129-x86_64-linux/rwkv/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))