drbh commited on
Commit
c124d86
·
unverified ·
0 Parent(s):

Migrated from kernels-community/quantization-gptq

Browse files
Files changed (25) hide show
  1. .gitattributes +55 -0
  2. build/torch210-cxx11-cpu-x86_64-linux/__init__.py +3 -0
  3. build/torch210-cxx11-cpu-x86_64-linux/_ops.py +9 -0
  4. build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_ba11934.abi3.so +3 -0
  5. build/torch210-cxx11-cpu-x86_64-linux/custom_ops.py +19 -0
  6. build/torch210-cxx11-cpu-x86_64-linux/metadata.json +8 -0
  7. build/torch210-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py +26 -0
  8. build/torch211-cxx11-cpu-x86_64-linux/__init__.py +3 -0
  9. build/torch211-cxx11-cpu-x86_64-linux/_ops.py +9 -0
  10. build/torch211-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_ba11934.abi3.so +3 -0
  11. build/torch211-cxx11-cpu-x86_64-linux/custom_ops.py +19 -0
  12. build/torch211-cxx11-cpu-x86_64-linux/metadata.json +8 -0
  13. build/torch211-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py +26 -0
  14. build/torch28-cxx11-cpu-x86_64-linux/__init__.py +3 -0
  15. build/torch28-cxx11-cpu-x86_64-linux/_ops.py +9 -0
  16. build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_491482a.abi3.so +3 -0
  17. build/torch28-cxx11-cpu-x86_64-linux/custom_ops.py +19 -0
  18. build/torch28-cxx11-cpu-x86_64-linux/metadata.json +4 -0
  19. build/torch28-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py +26 -0
  20. build/torch29-cxx11-cpu-x86_64-linux/__init__.py +3 -0
  21. build/torch29-cxx11-cpu-x86_64-linux/_ops.py +9 -0
  22. build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_833865c.abi3.so +3 -0
  23. build/torch29-cxx11-cpu-x86_64-linux/custom_ops.py +19 -0
  24. build/torch29-cxx11-cpu-x86_64-linux/metadata.json +5 -0
  25. build/torch29-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py +26 -0
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_8c16cd6.abi3.so filter=lfs diff=lfs merge=lfs -text
37
+ build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_8c16cd6.abi3.so filter=lfs diff=lfs merge=lfs -text
38
+ build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_986b11f.abi3.so filter=lfs diff=lfs merge=lfs -text
39
+ build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_986b11f.abi3.so filter=lfs diff=lfs merge=lfs -text
40
+ build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so filter=lfs diff=lfs merge=lfs -text
41
+ build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so filter=lfs diff=lfs merge=lfs -text
42
+ build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so filter=lfs diff=lfs merge=lfs -text
43
+ build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_1f6c2de.abi3.so filter=lfs diff=lfs merge=lfs -text
44
+ build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_1f6c2de.abi3.so filter=lfs diff=lfs merge=lfs -text
45
+ build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_1f6c2de.abi3.so filter=lfs diff=lfs merge=lfs -text
46
+ build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_b12bb50.abi3.so filter=lfs diff=lfs merge=lfs -text
47
+ build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_b12bb50.abi3.so filter=lfs diff=lfs merge=lfs -text
48
+ build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_b12bb50.abi3.so filter=lfs diff=lfs merge=lfs -text
49
+ build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_491482a.abi3.so filter=lfs diff=lfs merge=lfs -text
50
+ build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_491482a.abi3.so filter=lfs diff=lfs merge=lfs -text
51
+ build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_491482a.abi3.so filter=lfs diff=lfs merge=lfs -text
52
+ build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_833865c.abi3.so filter=lfs diff=lfs merge=lfs -text
53
+ build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_833865c.abi3.so filter=lfs diff=lfs merge=lfs -text
54
+ build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_ba11934.abi3.so filter=lfs diff=lfs merge=lfs -text
55
+ build/torch211-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_ba11934.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch210-cxx11-cpu-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import gemm_int4_forward
2
+
3
+ __all__ = ["gemm_int4_forward"]
build/torch210-cxx11-cpu-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_gptq_cpu_ba11934
3
+ ops = torch.ops._quantization_gptq_cpu_ba11934
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_gptq_cpu_ba11934::{op_name}"
build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_ba11934.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d00c780c3f921ac73ad86754f809b8c637e5813c26936d3f5a0a0d0447ea22
3
+ size 1950576
build/torch210-cxx11-cpu-x86_64-linux/custom_ops.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ def gemm_int4_forward(
5
+ input: torch.Tensor,
6
+ weight: torch.Tensor,
7
+ zeros: torch.Tensor,
8
+ absmax: torch.Tensor,
9
+ blocksize: int,
10
+ ) -> torch.Tensor:
11
+ original_dtype = input.dtype
12
+ if original_dtype != torch.bfloat16:
13
+ input = input.to(torch.bfloat16)
14
+
15
+ output = ops.gemm_int4_forward(input, weight, zeros, absmax, blocksize)
16
+ if original_dtype != torch.bfloat16:
17
+ output = output.to(original_dtype)
18
+
19
+ return output
build/torch210-cxx11-cpu-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "MIT",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cpu"
7
+ }
8
+ }
build/torch210-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch211-cxx11-cpu-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import gemm_int4_forward
2
+
3
+ __all__ = ["gemm_int4_forward"]
build/torch211-cxx11-cpu-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_gptq_cpu_ba11934
3
+ ops = torch.ops._quantization_gptq_cpu_ba11934
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_gptq_cpu_ba11934::{op_name}"
build/torch211-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_ba11934.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8568dbc11feb98d84f90a02f361d84f7a6780e79a53e6f4837345589bae2800f
3
+ size 1950576
build/torch211-cxx11-cpu-x86_64-linux/custom_ops.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ def gemm_int4_forward(
5
+ input: torch.Tensor,
6
+ weight: torch.Tensor,
7
+ zeros: torch.Tensor,
8
+ absmax: torch.Tensor,
9
+ blocksize: int,
10
+ ) -> torch.Tensor:
11
+ original_dtype = input.dtype
12
+ if original_dtype != torch.bfloat16:
13
+ input = input.to(torch.bfloat16)
14
+
15
+ output = ops.gemm_int4_forward(input, weight, zeros, absmax, blocksize)
16
+ if original_dtype != torch.bfloat16:
17
+ output = output.to(original_dtype)
18
+
19
+ return output
build/torch211-cxx11-cpu-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "MIT",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cpu"
7
+ }
8
+ }
build/torch211-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cpu-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import gemm_int4_forward
2
+
3
+ __all__ = ["gemm_int4_forward"]
build/torch28-cxx11-cpu-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_gptq_491482a
3
+ ops = torch.ops._quantization_gptq_491482a
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_gptq_491482a::{op_name}"
build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_491482a.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6bf1488920a99ecc3ce87dae7be7d5fcbab29867d6b3d380c5bc05a600183e
3
+ size 101912
build/torch28-cxx11-cpu-x86_64-linux/custom_ops.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ def gemm_int4_forward(
5
+ input: torch.Tensor,
6
+ weight: torch.Tensor,
7
+ zeros: torch.Tensor,
8
+ absmax: torch.Tensor,
9
+ blocksize: int,
10
+ ) -> torch.Tensor:
11
+ original_dtype = input.dtype
12
+ if original_dtype != torch.bfloat16:
13
+ input = input.to(torch.bfloat16)
14
+
15
+ output = ops.gemm_int4_forward(input, weight, zeros, absmax, blocksize)
16
+ if original_dtype != torch.bfloat16:
17
+ output = output.to(original_dtype)
18
+
19
+ return output
build/torch28-cxx11-cpu-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": []
4
+ }
build/torch28-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cpu-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import gemm_int4_forward
2
+
3
+ __all__ = ["gemm_int4_forward"]
build/torch29-cxx11-cpu-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_gptq_cpu_833865c
3
+ ops = torch.ops._quantization_gptq_cpu_833865c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_gptq_cpu_833865c::{op_name}"
build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_cpu_833865c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0d96d376c51ea5c07f14fe5400a6198d9cef00123c8563a4ddde351777f595e
3
+ size 1945192
build/torch29-cxx11-cpu-x86_64-linux/custom_ops.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ def gemm_int4_forward(
5
+ input: torch.Tensor,
6
+ weight: torch.Tensor,
7
+ zeros: torch.Tensor,
8
+ absmax: torch.Tensor,
9
+ blocksize: int,
10
+ ) -> torch.Tensor:
11
+ original_dtype = input.dtype
12
+ if original_dtype != torch.bfloat16:
13
+ input = input.to(torch.bfloat16)
14
+
15
+ output = ops.gemm_int4_forward(input, weight, zeros, absmax, blocksize)
16
+ if original_dtype != torch.bfloat16:
17
+ output = output.to(original_dtype)
18
+
19
+ return output
build/torch29-cxx11-cpu-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "MIT",
4
+ "python-depends": []
5
+ }
build/torch29-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))