jonghanko commited on
Commit
c9c5de0
·
verified ·
1 Parent(s): e815185

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims_native.h +21 -0
  2. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h +114 -0
  3. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h +40 -0
  4. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_ops.h +40 -0
  5. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe.h +35 -0
  6. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h +24 -0
  7. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h +21 -0
  8. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_ops.h +29 -0
  9. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod.h +40 -0
  10. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h +24 -0
  11. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_native.h +23 -0
  12. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_ops.h +40 -0
  13. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum.h +40 -0
  14. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_compositeexplicitautograd_dispatch.h +24 -0
  15. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_native.h +23 -0
  16. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_ops.h +40 -0
  17. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe.h +35 -0
  18. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_compositeimplicitautograd_dispatch.h +24 -0
  19. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_native.h +21 -0
  20. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_ops.h +29 -0
  21. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax.h +50 -0
  22. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data.h +40 -0
  23. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h +24 -0
  24. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_native.h +23 -0
  25. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_ops.h +40 -0
  26. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeexplicitautograd_dispatch.h +24 -0
  27. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h +24 -0
  28. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_native.h +25 -0
  29. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h +62 -0
  30. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection.h +35 -0
  31. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_compositeexplicitautograd_dispatch.h +24 -0
  32. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_native.h +22 -0
  33. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_ops.h +40 -0
  34. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm.h +36 -0
  35. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h +24 -0
  36. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_native.h +22 -0
  37. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_ops.h +40 -0
  38. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl.h +31 -0
  39. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward.h +31 -0
  40. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h +21 -0
  41. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h +29 -0
  42. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_native.h +21 -0
  43. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_ops.h +29 -0
  44. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm.h +31 -0
  45. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm_cuda_dispatch.h +23 -0
  46. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm_native.h +21 -0
  47. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm_ops.h +29 -0
  48. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply.h +31 -0
  49. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_cuda_dispatch.h +23 -0
  50. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_dense.h +31 -0
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor sparse_compressed_tensor_with_dims(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
20
+ } // namespace native
21
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
27
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced=::std::nullopt) {
28
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
29
+ }
30
+ namespace symint {
31
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
32
+ at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced=::std::nullopt) {
33
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
34
+ }
35
+ }
36
+
37
+ // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
38
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
39
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory, is_coalesced);
40
+ }
41
+ namespace symint {
42
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
43
+ at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
44
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory, is_coalesced);
45
+ }
46
+ }
47
+
48
+ // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
49
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced=::std::nullopt) {
50
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
51
+ }
52
+ namespace symint {
53
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
54
+ at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced=::std::nullopt) {
55
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
56
+ }
57
+ }
58
+
59
+ // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
60
+ inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
61
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
62
+ }
63
+ namespace symint {
64
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
65
+ at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
66
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
67
+ }
68
+ }
69
+
70
+ // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
71
+ inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced=::std::nullopt) {
72
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out);
73
+ }
74
+ namespace symint {
75
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
76
+ at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced=::std::nullopt) {
77
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out);
78
+ }
79
+ }
80
+
81
+ // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
82
+ inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
83
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out);
84
+ }
85
+ namespace symint {
86
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
87
+ at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
88
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out);
89
+ }
90
+ }
91
+
92
+ // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
93
+ inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced=::std::nullopt) {
94
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
95
+ }
96
+ namespace symint {
97
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
98
+ at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced=::std::nullopt) {
99
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
100
+ }
101
+ }
102
+
103
+ // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
104
+ inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
105
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
106
+ }
107
+ namespace symint {
108
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
109
+ at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
110
+ return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
111
+ }
112
+ }
113
+
114
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_coo_tensor_with_dims_and_tensors {
19
+ using schema = at::Tensor (int64_t, int64_t, c10::SymIntArrayRef, const at::Tensor &, const at::Tensor &, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, ::std::optional<bool>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_coo_tensor_with_dims_and_tensors";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor";
25
+ static at::Tensor call(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced);
27
+ };
28
+
29
+ struct TORCH_API _sparse_coo_tensor_with_dims_and_tensors_out {
30
+ using schema = at::Tensor & (int64_t, int64_t, c10::SymIntArrayRef, const at::Tensor &, const at::Tensor &, ::std::optional<bool>, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_coo_tensor_with_dims_and_tensors";
34
+ static constexpr const char* overload_name = "out";
35
+ static constexpr const char* schema_str = "_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_coo_tensor_with_dims {
19
+ using schema = at::Tensor (int64_t, int64_t, at::IntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_coo_tensor_with_dims";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor";
25
+ static at::Tensor call(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
27
+ };
28
+
29
+ struct TORCH_API _sparse_coo_tensor_with_dims_out {
30
+ using schema = at::Tensor & (int64_t, int64_t, at::IntArrayRef, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_coo_tensor_with_dims";
34
+ static constexpr const char* overload_name = "out";
35
+ static constexpr const char* schema_str = "_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_csc_tensor_unsafe_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
27
+ inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
28
+ return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
29
+ }
30
+ // aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
31
+ inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
32
+ return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
33
+ }
34
+
35
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={});
21
+ TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
20
+ } // namespace native
21
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_csc_tensor_unsafe {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_csc_tensor_unsafe";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_csr_prod_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
27
+ inline at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt) {
28
+ return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
29
+ }
30
+
31
+ // aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
32
+ inline at::Tensor & _sparse_csr_prod_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt) {
33
+ return at::_ops::_sparse_csr_prod_dim_dtype_out::call(self, dim, keepdim, dtype, out);
34
+ }
35
+ // aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & _sparse_csr_prod_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
37
+ return at::_ops::_sparse_csr_prod_dim_dtype_out::call(self, dim, keepdim, dtype, out);
38
+ }
39
+
40
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _sparse_csr_prod_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt);
21
+ TORCH_API at::Tensor & _sparse_csr_prod_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _sparse_csr_prod_dim_dtype_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
20
+ TORCH_API at::Tensor _sparse_csr_prod_cpu(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt);
21
+ TORCH_API at::Tensor _sparse_csr_prod_cuda(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt);
22
+ } // namespace native
23
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_csr_prod_dim_dtype {
19
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional<at::ScalarType>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_csr_prod";
23
+ static constexpr const char* overload_name = "dim_dtype";
24
+ static constexpr const char* schema_str = "_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
27
+ };
28
+
29
+ struct TORCH_API _sparse_csr_prod_dim_dtype_out {
30
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional<at::ScalarType>, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_csr_prod";
34
+ static constexpr const char* overload_name = "dim_dtype_out";
35
+ static constexpr const char* schema_str = "_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_csr_sum_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
27
+ inline at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt) {
28
+ return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
29
+ }
30
+
31
+ // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
32
+ inline at::Tensor & _sparse_csr_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt) {
33
+ return at::_ops::_sparse_csr_sum_dim_dtype_out::call(self, dim, keepdim, dtype, out);
34
+ }
35
+ // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & _sparse_csr_sum_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
37
+ return at::_ops::_sparse_csr_sum_dim_dtype_out::call(self, dim, keepdim, dtype, out);
38
+ }
39
+
40
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _sparse_csr_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt);
21
+ TORCH_API at::Tensor & _sparse_csr_sum_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _sparse_csr_sum_dim_dtype_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
20
+ TORCH_API at::Tensor _sparse_csr_sum_cpu(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt);
21
+ TORCH_API at::Tensor _sparse_csr_sum_cuda(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt);
22
+ } // namespace native
23
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_csr_sum_dim_dtype {
19
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional<at::ScalarType>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_csr_sum";
23
+ static constexpr const char* overload_name = "dim_dtype";
24
+ static constexpr const char* schema_str = "_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
27
+ };
28
+
29
+ struct TORCH_API _sparse_csr_sum_dim_dtype_out {
30
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional<at::ScalarType>, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_csr_sum";
34
+ static constexpr const char* overload_name = "dim_dtype_out";
35
+ static constexpr const char* schema_str = "_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_csr_tensor_unsafe_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
27
+ inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
28
+ return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
29
+ }
30
+ // aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
31
+ inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
32
+ return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
33
+ }
34
+
35
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={});
21
+ TORCH_API at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
20
+ } // namespace native
21
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_csr_tensor_unsafe {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_csr_tensor_unsafe";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_log_softmax_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
27
+ inline at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype=::std::nullopt) {
28
+ return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype);
29
+ }
30
+
31
+ // aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
32
+ inline at::Tensor _sparse_log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype=::std::nullopt) {
33
+ return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype);
34
+ }
35
+
36
+ // aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
37
+ inline at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
38
+ return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float);
39
+ }
40
+
41
+ // aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
42
+ inline at::Tensor & _sparse_log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
43
+ return at::_ops::_sparse_log_softmax_out::call(self, dim, half_to_float, out);
44
+ }
45
+ // aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
46
+ inline at::Tensor & _sparse_log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
47
+ return at::_ops::_sparse_log_softmax_out::call(self, dim, half_to_float, out);
48
+ }
49
+
50
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_log_softmax_backward_data_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
27
+ inline at::Tensor _sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
28
+ return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self);
29
+ }
30
+
31
+ // aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
32
+ inline at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
33
+ return at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output, output, dim, self, out);
34
+ }
35
+ // aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
37
+ return at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output, output, dim, self, out);
38
+ }
39
+
40
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self);
21
+ TORCH_API at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _sparse_log_softmax_backward_data_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out);
20
+ TORCH_API at::Tensor log_softmax_backward_sparse_cpu(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self);
21
+ TORCH_API at::Tensor log_softmax_backward_sparse_cuda(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self);
22
+ } // namespace native
23
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_log_softmax_backward_data {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_log_softmax_backward_data";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self);
27
+ };
28
+
29
+ struct TORCH_API _sparse_log_softmax_backward_data_out {
30
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_log_softmax_backward_data";
34
+ static constexpr const char* overload_name = "out";
35
+ static constexpr const char* schema_str = "_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _sparse_log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float);
21
+ TORCH_API at::Tensor & _sparse_log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype=::std::nullopt);
21
+ TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype=::std::nullopt);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype=::std::nullopt);
20
+ TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype=::std::nullopt);
21
+ TORCH_API at::Tensor & _sparse_log_softmax_out(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
22
+ TORCH_API at::Tensor log_softmax_sparse_cpu(const at::Tensor & self, int64_t dim, bool half_to_float);
23
+ TORCH_API at::Tensor log_softmax_sparse_cuda(const at::Tensor & self, int64_t dim, bool half_to_float);
24
+ } // namespace native
25
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_log_softmax_int {
19
+ using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional<at::ScalarType>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_log_softmax";
23
+ static constexpr const char* overload_name = "int";
24
+ static constexpr const char* schema_str = "_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype);
27
+ };
28
+
29
+ struct TORCH_API _sparse_log_softmax_Dimname {
30
+ using schema = at::Tensor (const at::Tensor &, at::Dimname, ::std::optional<at::ScalarType>);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_log_softmax";
34
+ static constexpr const char* overload_name = "Dimname";
35
+ static constexpr const char* schema_str = "_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor";
36
+ static at::Tensor call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype);
37
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype);
38
+ };
39
+
40
+ struct TORCH_API _sparse_log_softmax {
41
+ using schema = at::Tensor (const at::Tensor &, int64_t, bool);
42
+ using ptr_schema = schema*;
43
+ // See Note [static constexpr char* members for windows NVCC]
44
+ static constexpr const char* name = "aten::_sparse_log_softmax";
45
+ static constexpr const char* overload_name = "";
46
+ static constexpr const char* schema_str = "_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor";
47
+ static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float);
48
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float);
49
+ };
50
+
51
+ struct TORCH_API _sparse_log_softmax_out {
52
+ using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &);
53
+ using ptr_schema = schema*;
54
+ // See Note [static constexpr char* members for windows NVCC]
55
+ static constexpr const char* name = "aten::_sparse_log_softmax";
56
+ static constexpr const char* overload_name = "out";
57
+ static constexpr const char* schema_str = "_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)";
58
+ static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
59
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
60
+ };
61
+
62
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_mask_projection_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
27
+ inline at::Tensor & _sparse_mask_projection_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches=false) {
28
+ return at::_ops::_sparse_mask_projection_out::call(self, mask, accumulate_matches, out);
29
+ }
30
+ // aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _sparse_mask_projection_outf(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) {
32
+ return at::_ops::_sparse_mask_projection_out::call(self, mask, accumulate_matches, out);
33
+ }
34
+
35
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _sparse_mask_projection_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches=false);
21
+ TORCH_API at::Tensor & _sparse_mask_projection_outf(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _sparse_mask_projection_out(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out);
20
+ TORCH_API at::Tensor sparse_mask_projection(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches=false);
21
+ } // namespace native
22
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_mask_projection {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_mask_projection";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches);
27
+ };
28
+
29
+ struct TORCH_API _sparse_mask_projection_out {
30
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_mask_projection";
34
+ static constexpr const char* overload_name = "out";
35
+ static constexpr const char* schema_str = "_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_mm_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
27
+ inline at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) {
28
+ return at::_ops::_sparse_mm::call(sparse, dense);
29
+ }
30
+
31
+ // aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
32
+ inline at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
33
+ return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce);
34
+ }
35
+
36
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense);
21
+ TORCH_API at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense);
20
+ TORCH_API at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce);
21
+ } // namespace native
22
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_mm {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_mm";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_mm(Tensor sparse, Tensor dense) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & sparse, const at::Tensor & dense);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense);
27
+ };
28
+
29
+ struct TORCH_API _sparse_mm_reduce {
30
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::string_view);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::_sparse_mm";
34
+ static constexpr const char* overload_name = "reduce";
35
+ static constexpr const char* schema_str = "_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor";
36
+ static at::Tensor call(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce);
37
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_mm_reduce_impl_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
27
+ inline ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
28
+ return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce);
29
+ }
30
+
31
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_mm_reduce_impl_backward_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
27
+ inline ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
28
+ return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask);
29
+ }
30
+
31
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward_sparse_csr_cpu(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask);
20
+ } // namespace native
21
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_mm_reduce_impl_backward {
19
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const at::Tensor &, ::std::array<bool,2>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_mm_reduce_impl_backward";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)";
25
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask);
26
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_sparse_csr_cpu(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce);
20
+ } // namespace native
21
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_mm_reduce_impl {
19
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, c10::string_view);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_mm_reduce_impl";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)";
25
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce);
26
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::string_view reduce);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_semi_structured_addmm_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor
27
+ inline at::Tensor _sparse_semi_structured_addmm(const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha=1, const at::Scalar & beta=1, ::std::optional<at::ScalarType> out_dtype=::std::nullopt) {
28
+ return at::_ops::_sparse_semi_structured_addmm::call(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype);
29
+ }
30
+
31
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _sparse_semi_structured_addmm(const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha=1, const at::Scalar & beta=1, ::std::optional<at::ScalarType> out_dtype=::std::nullopt);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _sparse_semi_structured_addmm(const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha=1, const at::Scalar & beta=1, ::std::optional<at::ScalarType> out_dtype=::std::nullopt);
20
+ } // namespace native
21
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_addmm_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API _sparse_semi_structured_addmm {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, ::std::optional<at::ScalarType>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::_sparse_semi_structured_addmm";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "_sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha, const at::Scalar & beta, ::std::optional<at::ScalarType> out_dtype);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha, const at::Scalar & beta, ::std::optional<at::ScalarType> out_dtype);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_semi_structured_apply_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor)
27
+ inline ::std::tuple<at::Tensor,at::Tensor> _sparse_semi_structured_apply(const at::Tensor & input, const at::Tensor & thread_masks) {
28
+ return at::_ops::_sparse_semi_structured_apply::call(input, thread_masks);
29
+ }
30
+
31
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _sparse_semi_structured_apply(const at::Tensor & input, const at::Tensor & thread_masks);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_dense.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/_sparse_semi_structured_apply_dense_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::_sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor
27
+ inline at::Tensor _sparse_semi_structured_apply_dense(const at::Tensor & input, const at::Tensor & thread_masks) {
28
+ return at::_ops::_sparse_semi_structured_apply_dense::call(input, thread_masks);
29
+ }
30
+
31
+ }