danieldk HF Staff commited on
Commit
0008885
·
verified ·
1 Parent(s): ece999d

Build uploaded using `kernels`.

Browse files
Files changed (30) hide show
  1. .gitattributes +7 -0
  2. build/torch210-cxx11-cu126-x86_64-linux/{_mra_cuda_8d73b81.abi3.so → _mra_cuda_c1eaa2d.abi3.so} +1 -1
  3. build/torch210-cxx11-cu126-x86_64-linux/_ops.py +3 -3
  4. build/torch210-cxx11-cu126-x86_64-linux/mra/__init__.py +2 -2
  5. build/torch210-cxx11-cu128-x86_64-linux/{_mra_cuda_8d73b81.abi3.so → _mra_cuda_c1eaa2d.abi3.so} +1 -1
  6. build/torch210-cxx11-cu128-x86_64-linux/_ops.py +3 -3
  7. build/torch210-cxx11-cu128-x86_64-linux/mra/__init__.py +2 -2
  8. build/torch210-cxx11-cu130-x86_64-linux/{_mra_cuda_8d73b81.abi3.so → _mra_cuda_c1eaa2d.abi3.so} +1 -1
  9. build/torch210-cxx11-cu130-x86_64-linux/_ops.py +3 -3
  10. build/torch210-cxx11-cu130-x86_64-linux/mra/__init__.py +2 -2
  11. build/torch211-cxx11-cu126-x86_64-linux/__init__.py +25 -0
  12. build/torch211-cxx11-cu126-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so +3 -0
  13. build/torch211-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  14. build/torch211-cxx11-cu126-x86_64-linux/metadata.json +17 -0
  15. build/torch211-cxx11-cu126-x86_64-linux/mra/__init__.py +26 -0
  16. build/torch211-cxx11-cu128-x86_64-linux/__init__.py +25 -0
  17. build/torch211-cxx11-cu128-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so +3 -0
  18. build/torch211-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  19. build/torch211-cxx11-cu128-x86_64-linux/metadata.json +20 -0
  20. build/torch211-cxx11-cu128-x86_64-linux/mra/__init__.py +26 -0
  21. build/torch211-cxx11-cu130-x86_64-linux/__init__.py +25 -0
  22. build/torch211-cxx11-cu130-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so +3 -0
  23. build/torch211-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  24. build/torch211-cxx11-cu130-x86_64-linux/metadata.json +18 -0
  25. build/torch211-cxx11-cu130-x86_64-linux/mra/__init__.py +26 -0
  26. build/torch29-cxx11-cu129-x86_64-linux/__init__.py +25 -0
  27. build/torch29-cxx11-cu129-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so +3 -0
  28. build/torch29-cxx11-cu129-x86_64-linux/_ops.py +9 -0
  29. build/torch29-cxx11-cu129-x86_64-linux/metadata.json +20 -0
  30. build/torch29-cxx11-cu129-x86_64-linux/mra/__init__.py +26 -0
.gitattributes CHANGED
@@ -107,3 +107,10 @@ build/torch211-cxx11-cu126-aarch64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs di
107
  build/torch211-cxx11-cu128-aarch64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
108
  build/torch211-cxx11-cu130-aarch64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
109
  build/torch29-cxx11-cu129-aarch64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
107
  build/torch211-cxx11-cu128-aarch64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
108
  build/torch211-cxx11-cu130-aarch64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
109
  build/torch29-cxx11-cu129-aarch64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
110
+ build/torch210-cxx11-cu126-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
111
+ build/torch210-cxx11-cu128-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
112
+ build/torch210-cxx11-cu130-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
113
+ build/torch211-cxx11-cu126-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
114
+ build/torch211-cxx11-cu128-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
115
+ build/torch211-cxx11-cu130-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
116
+ build/torch29-cxx11-cu129-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch210-cxx11-cu126-x86_64-linux/{_mra_cuda_8d73b81.abi3.so → _mra_cuda_c1eaa2d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba41ec23dc215dd0cabe383feb8e640054f975fceb4350b1421b6d9af9118f0a
3
  size 2451480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cc021351bfa4e923b15d186877cddf3d935d6223a369f40ffabb12507536e90
3
  size 2451480
build/torch210-cxx11-cu126-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_8d73b81
3
- ops = torch.ops._mra_cuda_8d73b81
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_8d73b81::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_c1eaa2d
3
+ ops = torch.ops._mra_cuda_c1eaa2d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_c1eaa2d::{op_name}"
build/torch210-cxx11-cu126-x86_64-linux/mra/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu128-x86_64-linux/{_mra_cuda_8d73b81.abi3.so → _mra_cuda_c1eaa2d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0dc6e29a3f9d03cfe9644d4fc7cb9a43a5913ca2b30a833ba914e76e583ea6a4
3
  size 2719848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b1ce65f7d848240c848986a70ec25bc6bf1bc53c3046df1461649630afb81f8
3
  size 2719848
build/torch210-cxx11-cu128-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_8d73b81
3
- ops = torch.ops._mra_cuda_8d73b81
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_8d73b81::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_c1eaa2d
3
+ ops = torch.ops._mra_cuda_c1eaa2d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_c1eaa2d::{op_name}"
build/torch210-cxx11-cu128-x86_64-linux/mra/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch210-cxx11-cu130-x86_64-linux/{_mra_cuda_8d73b81.abi3.so → _mra_cuda_c1eaa2d.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a10865ad51b3e8f204813fef88144ff158ae93dd8462027a88f04c849fa9951c
3
  size 2641368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26e6338feb8e2e4589397574e56ccf8b1e2761714e6ae0b5a474030b9e95f4f5
3
  size 2641368
build/torch210-cxx11-cu130-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_8d73b81
3
- ops = torch.ops._mra_cuda_8d73b81
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_8d73b81::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_c1eaa2d
3
+ ops = torch.ops._mra_cuda_c1eaa2d
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_c1eaa2d::{op_name}"
build/torch210-cxx11-cu130-x86_64-linux/mra/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
  import ctypes
 
2
  import sys
3
-
4
- import importlib
5
  from pathlib import Path
6
  from types import ModuleType
7
 
 
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
 
1
  import ctypes
2
+ import importlib.util
3
  import sys
 
 
4
  from pathlib import Path
5
  from types import ModuleType
6
 
7
+
8
  def _import_from_path(file_path: Path) -> ModuleType:
9
  # We cannot use the module name as-is, after adding it to `sys.modules`,
10
  # it would also be used for other imports. So, we make a module name that
build/torch211-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ import torch
3
+
4
+ def index_max(index_vals: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
5
+ return ops.index_max(index_vals, indices, A_num_block, B_num_block)
6
+
7
+ def mm_to_sparse(dense_A: torch.Tensor, dense_B: torch.Tensor, indices: torch.Tensor):
8
+ return ops.mm_to_sparse(dense_A, dense_B, indices)
9
+
10
+ def sparse_dense_mm(sparse_A: torch.Tensor, indices: torch.Tensor, dense_B: torch.Tensor, A_num_block: int):
11
+ return ops.sparse_dense_mm(sparse_A, indices, dense_B, A_num_block)
12
+
13
+ def reduce_sum(sparse_A: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
14
+ return ops.reduce_sum(sparse_A, indices, A_num_block, B_num_block)
15
+
16
+ def scatter(dense_A: torch.Tensor, indices: torch.Tensor, B_num_block: int):
17
+ return ops.scatter(dense_A, indices, B_num_block)
18
+
19
+ __all__ = [
20
+ "index_max",
21
+ "mm_to_sparse",
22
+ "sparse_dense_mm",
23
+ "reduce_sum",
24
+ "scatter",
25
+ ]
build/torch211-cxx11-cu126-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd2ac9defcbaf5d03db15bc1bd55476e4520c3eb91b157a6f2488d37a16f011
3
+ size 2451480
build/torch211-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _mra_cuda_c1eaa2d
3
+ ops = torch.ops._mra_cuda_c1eaa2d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_mra_cuda_c1eaa2d::{op_name}"
build/torch211-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "7.0",
8
+ "7.2",
9
+ "7.5",
10
+ "8.0",
11
+ "8.6",
12
+ "8.7",
13
+ "8.9",
14
+ "9.0+PTX"
15
+ ]
16
+ }
17
+ }
build/torch211-cxx11-cu126-x86_64-linux/mra/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ import torch
3
+
4
+ def index_max(index_vals: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
5
+ return ops.index_max(index_vals, indices, A_num_block, B_num_block)
6
+
7
+ def mm_to_sparse(dense_A: torch.Tensor, dense_B: torch.Tensor, indices: torch.Tensor):
8
+ return ops.mm_to_sparse(dense_A, dense_B, indices)
9
+
10
+ def sparse_dense_mm(sparse_A: torch.Tensor, indices: torch.Tensor, dense_B: torch.Tensor, A_num_block: int):
11
+ return ops.sparse_dense_mm(sparse_A, indices, dense_B, A_num_block)
12
+
13
+ def reduce_sum(sparse_A: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
14
+ return ops.reduce_sum(sparse_A, indices, A_num_block, B_num_block)
15
+
16
+ def scatter(dense_A: torch.Tensor, indices: torch.Tensor, B_num_block: int):
17
+ return ops.scatter(dense_A, indices, B_num_block)
18
+
19
+ __all__ = [
20
+ "index_max",
21
+ "mm_to_sparse",
22
+ "sparse_dense_mm",
23
+ "reduce_sum",
24
+ "scatter",
25
+ ]
build/torch211-cxx11-cu128-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55c46f680dc0f314e39ae37b138c9a7c74cf12b77fc41d8a71f4de0692803a92
3
+ size 2719848
build/torch211-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _mra_cuda_c1eaa2d
3
+ ops = torch.ops._mra_cuda_c1eaa2d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_mra_cuda_c1eaa2d::{op_name}"
build/torch211-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "10.0",
8
+ "10.1",
9
+ "12.0+PTX",
10
+ "7.0",
11
+ "7.2",
12
+ "7.5",
13
+ "8.0",
14
+ "8.6",
15
+ "8.7",
16
+ "8.9",
17
+ "9.0"
18
+ ]
19
+ }
20
+ }
build/torch211-cxx11-cu128-x86_64-linux/mra/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ import torch
3
+
4
+ def index_max(index_vals: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
5
+ return ops.index_max(index_vals, indices, A_num_block, B_num_block)
6
+
7
+ def mm_to_sparse(dense_A: torch.Tensor, dense_B: torch.Tensor, indices: torch.Tensor):
8
+ return ops.mm_to_sparse(dense_A, dense_B, indices)
9
+
10
+ def sparse_dense_mm(sparse_A: torch.Tensor, indices: torch.Tensor, dense_B: torch.Tensor, A_num_block: int):
11
+ return ops.sparse_dense_mm(sparse_A, indices, dense_B, A_num_block)
12
+
13
+ def reduce_sum(sparse_A: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
14
+ return ops.reduce_sum(sparse_A, indices, A_num_block, B_num_block)
15
+
16
+ def scatter(dense_A: torch.Tensor, indices: torch.Tensor, B_num_block: int):
17
+ return ops.scatter(dense_A, indices, B_num_block)
18
+
19
+ __all__ = [
20
+ "index_max",
21
+ "mm_to_sparse",
22
+ "sparse_dense_mm",
23
+ "reduce_sum",
24
+ "scatter",
25
+ ]
build/torch211-cxx11-cu130-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1100047c3ad05fa54430cad675e934896bdfb8eeae70903e1e452d9a30cc789
3
+ size 2641368
build/torch211-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _mra_cuda_c1eaa2d
3
+ ops = torch.ops._mra_cuda_c1eaa2d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_mra_cuda_c1eaa2d::{op_name}"
build/torch211-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "10.0",
8
+ "11.0",
9
+ "12.0+PTX",
10
+ "7.5",
11
+ "8.0",
12
+ "8.6",
13
+ "8.7",
14
+ "8.9",
15
+ "9.0"
16
+ ]
17
+ }
18
+ }
build/torch211-cxx11-cu130-x86_64-linux/mra/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu129-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ import torch
3
+
4
+ def index_max(index_vals: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
5
+ return ops.index_max(index_vals, indices, A_num_block, B_num_block)
6
+
7
+ def mm_to_sparse(dense_A: torch.Tensor, dense_B: torch.Tensor, indices: torch.Tensor):
8
+ return ops.mm_to_sparse(dense_A, dense_B, indices)
9
+
10
+ def sparse_dense_mm(sparse_A: torch.Tensor, indices: torch.Tensor, dense_B: torch.Tensor, A_num_block: int):
11
+ return ops.sparse_dense_mm(sparse_A, indices, dense_B, A_num_block)
12
+
13
+ def reduce_sum(sparse_A: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
14
+ return ops.reduce_sum(sparse_A, indices, A_num_block, B_num_block)
15
+
16
+ def scatter(dense_A: torch.Tensor, indices: torch.Tensor, B_num_block: int):
17
+ return ops.scatter(dense_A, indices, B_num_block)
18
+
19
+ __all__ = [
20
+ "index_max",
21
+ "mm_to_sparse",
22
+ "sparse_dense_mm",
23
+ "reduce_sum",
24
+ "scatter",
25
+ ]
build/torch29-cxx11-cu129-x86_64-linux/_mra_cuda_c1eaa2d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50483a6e187b54c590af77b0ca375bd072b4b8be3c36ed46acba254ce285f73f
3
+ size 2748224
build/torch29-cxx11-cu129-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _mra_cuda_c1eaa2d
3
+ ops = torch.ops._mra_cuda_c1eaa2d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_mra_cuda_c1eaa2d::{op_name}"
build/torch29-cxx11-cu129-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": [],
4
+ "backend": {
5
+ "type": "cuda",
6
+ "archs": [
7
+ "10.0",
8
+ "10.1",
9
+ "12.0+PTX",
10
+ "7.0",
11
+ "7.2",
12
+ "7.5",
13
+ "8.0",
14
+ "8.6",
15
+ "8.7",
16
+ "8.9",
17
+ "9.0"
18
+ ]
19
+ }
20
+ }
build/torch29-cxx11-cu129-x86_64-linux/mra/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))