jonghanko commited on
Commit
a675c4e
·
verified ·
1 Parent(s): 8fa282a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_gru_cell_ops.h +29 -0
  2. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_ops.h +29 -0
  3. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool1d_compositeexplicitautograd_dispatch.h +24 -0
  4. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool1d_native.h +22 -0
  5. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h +24 -0
  6. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d_ops.h +40 -0
  7. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell.h +31 -0
  8. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h +23 -0
  9. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_ops.h +29 -0
  10. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell.h +31 -0
  11. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_compositeimplicitautograd_dispatch.h +23 -0
  12. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_native.h +21 -0
  13. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_ops.h +29 -0
  14. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg.h +45 -0
  15. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_compositeexplicitautograd_dispatch.h +26 -0
  16. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_native.h +29 -0
  17. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_ops.h +51 -0
  18. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand.h +378 -0
  19. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_compositeexplicitautograd_dispatch.h +50 -0
  20. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_compositeimplicitautograd_dispatch.h +26 -0
  21. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like.h +44 -0
  22. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_compositeexplicitautograd_dispatch.h +26 -0
  23. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_native.h +22 -0
  24. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_ops.h +40 -0
  25. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_native.h +28 -0
  26. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_ops.h +106 -0
  27. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint.h +378 -0
  28. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_compositeexplicitautograd_dispatch.h +54 -0
  29. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like.h +220 -0
  30. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_compositeexplicitautograd_dispatch.h +42 -0
  31. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_native.h +26 -0
  32. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_ops.h +84 -0
  33. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_native.h +28 -0
  34. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_ops.h +106 -0
  35. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn.h +378 -0
  36. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h +46 -0
  37. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeimplicitautograd_dispatch.h +30 -0
  38. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like.h +44 -0
  39. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h +26 -0
  40. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_compositeimplicitautogradnestedtensor_dispatch.h +24 -0
  41. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_native.h +22 -0
  42. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_ops.h +40 -0
  43. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_native.h +28 -0
  44. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_ops.h +106 -0
  45. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random.h +68 -0
  46. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_compositeexplicitautograd_dispatch.h +31 -0
  47. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h +25 -0
  48. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_cuda_dispatch.h +25 -0
  49. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_meta_dispatch.h +25 -0
  50. Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_native.h +32 -0
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_gru_cell_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API quantized_gru_cell {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::quantized_gru_cell";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_lstm_cell_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API quantized_lstm_cell {
19
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::quantized_lstm_cell";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)";
25
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
26
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool1d_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & quantized_max_pool1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false);
21
+ TORCH_API at::Tensor & quantized_max_pool1d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool1d_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & quantized_max_pool1d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out);
20
+ TORCH_API at::Tensor quantized_max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false);
21
+ } // namespace native
22
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & quantized_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false);
21
+ TORCH_API at::Tensor & quantized_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API quantized_max_pool3d {
19
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::quantized_max_pool3d";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode);
27
+ };
28
+
29
+ struct TORCH_API quantized_max_pool3d_out {
30
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::quantized_max_pool3d";
34
+ static constexpr const char* overload_name = "out";
35
+ static constexpr const char* schema_str = "quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/quantized_rnn_relu_cell_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
27
+ inline at::Tensor quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
28
+ return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
29
+ }
30
+
31
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API quantized_rnn_relu_cell {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::quantized_rnn_relu_cell";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/quantized_rnn_tanh_cell_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
27
+ inline at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
28
+ return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
29
+ }
30
+
31
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
20
+ } // namespace native
21
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_tanh_cell_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API quantized_rnn_tanh_cell {
19
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::quantized_rnn_tanh_cell";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
27
+ };
28
+
29
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/rad2deg_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::rad2deg(Tensor self) -> Tensor
27
+ inline at::Tensor rad2deg(const at::Tensor & self) {
28
+ return at::_ops::rad2deg::call(self);
29
+ }
30
+
31
+ // aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
32
+ inline at::Tensor & rad2deg_(at::Tensor & self) {
33
+ return at::_ops::rad2deg_::call(self);
34
+ }
35
+
36
+ // aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
37
+ inline at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
38
+ return at::_ops::rad2deg_out::call(self, out);
39
+ }
40
+ // aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
41
+ inline at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
42
+ return at::_ops::rad2deg_out::call(self, out);
43
+ }
44
+
45
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor rad2deg(const at::Tensor & self);
21
+ TORCH_API at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & rad2deg_(at::Tensor & self);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_native.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor rad2deg(const at::Tensor & self);
20
+ TORCH_API at::Tensor & rad2deg_out(const at::Tensor & self, at::Tensor & out);
21
+ TORCH_API at::Tensor & rad2deg_(at::Tensor & self);
22
+ TORCH_API at::Tensor rad2deg_sparse(const at::Tensor & self);
23
+ TORCH_API at::Tensor & rad2deg_sparse_out(const at::Tensor & self, at::Tensor & out);
24
+ TORCH_API at::Tensor & rad2deg_sparse_(at::Tensor & self);
25
+ TORCH_API at::Tensor rad2deg_sparse_csr(const at::Tensor & self);
26
+ TORCH_API at::Tensor & rad2deg_sparse_csr_out(const at::Tensor & self, at::Tensor & out);
27
+ TORCH_API at::Tensor & rad2deg_sparse_csr_(at::Tensor & self);
28
+ } // namespace native
29
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_ops.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API rad2deg {
19
+ using schema = at::Tensor (const at::Tensor &);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::rad2deg";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "rad2deg(Tensor self) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
27
+ };
28
+
29
+ struct TORCH_API rad2deg_ {
30
+ using schema = at::Tensor & (at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::rad2deg_";
34
+ static constexpr const char* overload_name = "";
35
+ static constexpr const char* schema_str = "rad2deg_(Tensor(a!) self) -> Tensor(a!)";
36
+ static at::Tensor & call(at::Tensor & self);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
38
+ };
39
+
40
+ struct TORCH_API rad2deg_out {
41
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
42
+ using ptr_schema = schema*;
43
+ // See Note [static constexpr char* members for windows NVCC]
44
+ static constexpr const char* name = "aten::rad2deg";
45
+ static constexpr const char* overload_name = "out";
46
+ static constexpr const char* schema_str = "rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)";
47
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
48
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
49
+ };
50
+
51
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand.h ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/rand_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
27
+ inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
28
+ return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
29
+ }
30
+ namespace symint {
31
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
32
+ at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
33
+ return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
34
+ }
35
+ }
36
+
37
+ // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
38
+ inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
39
+ return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
40
+ }
41
+ namespace symint {
42
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
43
+ at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
44
+ return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
45
+ }
46
+ }
47
+
48
+ // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
49
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
50
+ return at::_ops::rand_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
51
+ }
52
+ namespace symint {
53
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
54
+ at::Tensor rand(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
55
+ return at::_ops::rand_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
56
+ }
57
+ }
58
+
59
+ // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
60
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
61
+ return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory);
62
+ }
63
+ namespace symint {
64
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
65
+ at::Tensor rand(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
66
+ return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory);
67
+ }
68
+ }
69
+
70
+ // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
71
+ inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
72
+ return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
73
+ }
74
+ namespace symint {
75
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
76
+ at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
77
+ return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
78
+ }
79
+ }
80
+
81
+ // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
82
+ inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
83
+ return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
84
+ }
85
+ namespace symint {
86
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
87
+ at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
88
+ return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
89
+ }
90
+ }
91
+
92
+ // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
93
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
94
+ return at::_ops::rand_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
95
+ }
96
+ namespace symint {
97
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
98
+ at::Tensor rand(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
99
+ return at::_ops::rand_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
100
+ }
101
+ }
102
+
103
+ // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
104
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
105
+ return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
106
+ }
107
+ namespace symint {
108
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
109
+ at::Tensor rand(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
110
+ return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
111
+ }
112
+ }
113
+
114
+ // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
115
+ inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) {
116
+ return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
117
+ }
118
+ namespace symint {
119
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
120
+ at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) {
121
+ return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
122
+ }
123
+ }
124
+
125
+ // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
126
+ inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
127
+ return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
128
+ }
129
+ namespace symint {
130
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
131
+ at::Tensor rand(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
132
+ return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
133
+ }
134
+ }
135
+
136
+ // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
137
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) {
138
+ return at::_ops::rand::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
139
+ }
140
+ namespace symint {
141
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
142
+ at::Tensor rand(c10::SymIntArrayRef size, at::TensorOptions options={}) {
143
+ return at::_ops::rand::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
144
+ }
145
+ }
146
+
147
+ // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
148
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
149
+ return at::_ops::rand::call(size, dtype, layout, device, pin_memory);
150
+ }
151
+ namespace symint {
152
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
153
+ at::Tensor rand(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
154
+ return at::_ops::rand::call(size, dtype, layout, device, pin_memory);
155
+ }
156
+ }
157
+
158
+ // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
159
+ inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
160
+ return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
161
+ }
162
+ namespace symint {
163
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
164
+ at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
165
+ return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
166
+ }
167
+ }
168
+
169
+ // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
170
+ inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
171
+ return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
172
+ }
173
+ namespace symint {
174
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
175
+ at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
176
+ return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
177
+ }
178
+ }
179
+
180
+ // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
181
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
182
+ return at::_ops::rand_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
183
+ }
184
+ namespace symint {
185
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
186
+ at::Tensor rand(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
187
+ return at::_ops::rand_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
188
+ }
189
+ }
190
+
191
+ // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
192
+ inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
193
+ return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory);
194
+ }
195
+ namespace symint {
196
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
197
+ at::Tensor rand(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
198
+ return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory);
199
+ }
200
+ }
201
+
202
+ // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
203
+ inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) {
204
+ return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
205
+ }
206
+ namespace symint {
207
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
208
+ at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) {
209
+ return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
210
+ }
211
+ }
212
+
213
+ // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
214
+ inline at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) {
215
+ return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
216
+ }
217
+ namespace symint {
218
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
219
+ at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) {
220
+ return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
221
+ }
222
+ }
223
+
224
+ // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
225
+ inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
226
+ return at::_ops::rand_out::call(size, out);
227
+ }
228
+ namespace symint {
229
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
230
+ at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size) {
231
+ return at::_ops::rand_out::call(size, out);
232
+ }
233
+ }
234
+
235
+ // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
236
+ inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
237
+ return at::_ops::rand_out::call(size, out);
238
+ }
239
+ namespace symint {
240
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
241
+ at::Tensor & rand_outf(c10::SymIntArrayRef size, at::Tensor & out) {
242
+ return at::_ops::rand_out::call(size, out);
243
+ }
244
+ }
245
+
246
+ // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
247
+ inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
248
+ return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
249
+ }
250
+ namespace symint {
251
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
252
+ at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
253
+ return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
254
+ }
255
+ }
256
+
257
+ // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
258
+ inline at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
259
+ return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
260
+ }
261
+ namespace symint {
262
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
263
+ at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
264
+ return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
265
+ }
266
+ }
267
+
268
+ // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
269
+ inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
270
+ return at::_ops::rand_generator_out::call(size, generator, out);
271
+ }
272
+ namespace symint {
273
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
274
+ at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
275
+ return at::_ops::rand_generator_out::call(size, generator, out);
276
+ }
277
+ }
278
+
279
+ // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
280
+ inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
281
+ return at::_ops::rand_generator_out::call(size, generator, out);
282
+ }
283
+ namespace symint {
284
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
285
+ at::Tensor & rand_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
286
+ return at::_ops::rand_generator_out::call(size, generator, out);
287
+ }
288
+ }
289
+
290
+ // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
291
+ inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::DimnameList> names) {
292
+ return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
293
+ }
294
+ namespace symint {
295
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
296
+ at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::DimnameList> names) {
297
+ return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
298
+ }
299
+ }
300
+
301
+ // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
302
+ inline at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
303
+ return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
304
+ }
305
+ namespace symint {
306
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
307
+ at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
308
+ return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
309
+ }
310
+ }
311
+
312
+ // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
313
+ inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names) {
314
+ return at::_ops::rand_names_out::call(size, names, out);
315
+ }
316
+ namespace symint {
317
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
318
+ at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names) {
319
+ return at::_ops::rand_names_out::call(size, names, out);
320
+ }
321
+ }
322
+
323
+ // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
324
+ inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
325
+ return at::_ops::rand_names_out::call(size, names, out);
326
+ }
327
+ namespace symint {
328
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
329
+ at::Tensor & rand_outf(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
330
+ return at::_ops::rand_names_out::call(size, names, out);
331
+ }
332
+ }
333
+
334
+ // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
335
+ inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
336
+ return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
337
+ }
338
+ namespace symint {
339
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
340
+ at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
341
+ return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
342
+ }
343
+ }
344
+
345
+ // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
346
+ inline at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
347
+ return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
348
+ }
349
+ namespace symint {
350
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
351
+ at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
352
+ return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
353
+ }
354
+ }
355
+
356
+ // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
357
+ inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
358
+ return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
359
+ }
360
+ namespace symint {
361
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
362
+ at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
363
+ return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
364
+ }
365
+ }
366
+
367
+ // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
368
+ inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
369
+ return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
370
+ }
371
+ namespace symint {
372
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
373
+ at::Tensor & rand_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
374
+ return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
375
+ }
376
+ }
377
+
378
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
21
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
22
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
23
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
24
+ TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::DimnameList> names);
25
+ TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
26
+ TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names);
27
+ TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
28
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
29
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
30
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
31
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
32
+ TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names);
33
+ TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
34
+ TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names);
35
+ TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
36
+ TORCH_API at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={});
37
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
38
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options={});
39
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
40
+ TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size);
41
+ TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out);
42
+ TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size);
43
+ TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor & out);
44
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={});
45
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
46
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={});
47
+ TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
48
+
49
+ } // namespace compositeexplicitautograd
50
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator);
21
+ TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
22
+ TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator);
23
+ TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
24
+
25
+ } // namespace compositeimplicitautograd
26
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/rand_like_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
27
+ inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
28
+ return at::_ops::rand_like::call(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
29
+ }
30
+ // aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
31
+ inline at::Tensor rand_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
32
+ return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format);
33
+ }
34
+
35
+ // aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & rand_like_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
37
+ return at::_ops::rand_like_out::call(self, memory_format, out);
38
+ }
39
+ // aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & rand_like_outf(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
41
+ return at::_ops::rand_like_out::call(self, memory_format, out);
42
+ }
43
+
44
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
21
+ TORCH_API at::Tensor rand_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
22
+ TORCH_API at::Tensor & rand_like_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
23
+ TORCH_API at::Tensor & rand_like_outf(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor rand_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
20
+ TORCH_API at::Tensor & rand_like_out(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_like_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API rand_like {
19
+ using schema = at::Tensor (const at::Tensor &, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, ::std::optional<at::MemoryFormat>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::rand_like";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
27
+ };
28
+
29
+ struct TORCH_API rand_like_out {
30
+ using schema = at::Tensor & (const at::Tensor &, ::std::optional<at::MemoryFormat>, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::rand_like";
34
+ static constexpr const char* overload_name = "out";
35
+ static constexpr const char* schema_str = "rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_native.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
20
+ TORCH_API at::Tensor & rand_names_out_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
21
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
22
+ TORCH_API at::Tensor & rand_generator_with_names_out_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
23
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
24
+ TORCH_API at::Tensor & rand_out(at::IntArrayRef size, at::Tensor & out);
25
+ TORCH_API at::Tensor & rand_out(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
26
+ TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
27
+ } // namespace native
28
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/rand_ops.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API rand_names {
19
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::DimnameList>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::rand";
23
+ static constexpr const char* overload_name = "names";
24
+ static constexpr const char* schema_str = "rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
25
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
27
+ };
28
+
29
+ struct TORCH_API rand_generator_with_names {
30
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::DimnameList>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::rand";
34
+ static constexpr const char* overload_name = "generator_with_names";
35
+ static constexpr const char* schema_str = "rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
36
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
37
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
38
+ };
39
+
40
+ struct TORCH_API rand {
41
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
42
+ using ptr_schema = schema*;
43
+ // See Note [static constexpr char* members for windows NVCC]
44
+ static constexpr const char* name = "aten::rand";
45
+ static constexpr const char* overload_name = "";
46
+ static constexpr const char* schema_str = "rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
47
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
48
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
49
+ };
50
+
51
+ struct TORCH_API rand_generator {
52
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
53
+ using ptr_schema = schema*;
54
+ // See Note [static constexpr char* members for windows NVCC]
55
+ static constexpr const char* name = "aten::rand";
56
+ static constexpr const char* overload_name = "generator";
57
+ static constexpr const char* schema_str = "rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
58
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
59
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
60
+ };
61
+
62
+ struct TORCH_API rand_out {
63
+ using schema = at::Tensor & (c10::SymIntArrayRef, at::Tensor &);
64
+ using ptr_schema = schema*;
65
+ // See Note [static constexpr char* members for windows NVCC]
66
+ static constexpr const char* name = "aten::rand";
67
+ static constexpr const char* overload_name = "out";
68
+ static constexpr const char* schema_str = "rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)";
69
+ static at::Tensor & call(c10::SymIntArrayRef size, at::Tensor & out);
70
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out);
71
+ };
72
+
73
+ struct TORCH_API rand_generator_out {
74
+ using schema = at::Tensor & (c10::SymIntArrayRef, ::std::optional<at::Generator>, at::Tensor &);
75
+ using ptr_schema = schema*;
76
+ // See Note [static constexpr char* members for windows NVCC]
77
+ static constexpr const char* name = "aten::rand";
78
+ static constexpr const char* overload_name = "generator_out";
79
+ static constexpr const char* schema_str = "rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)";
80
+ static at::Tensor & call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
81
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
82
+ };
83
+
84
+ struct TORCH_API rand_names_out {
85
+ using schema = at::Tensor & (c10::SymIntArrayRef, ::std::optional<at::DimnameList>, at::Tensor &);
86
+ using ptr_schema = schema*;
87
+ // See Note [static constexpr char* members for windows NVCC]
88
+ static constexpr const char* name = "aten::rand";
89
+ static constexpr const char* overload_name = "names_out";
90
+ static constexpr const char* schema_str = "rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)";
91
+ static at::Tensor & call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
92
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
93
+ };
94
+
95
+ struct TORCH_API rand_generator_with_names_out {
96
+ using schema = at::Tensor & (c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::DimnameList>, at::Tensor &);
97
+ using ptr_schema = schema*;
98
+ // See Note [static constexpr char* members for windows NVCC]
99
+ static constexpr const char* name = "aten::rand";
100
+ static constexpr const char* overload_name = "generator_with_names_out";
101
+ static constexpr const char* schema_str = "rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)";
102
+ static at::Tensor & call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
103
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
104
+ };
105
+
106
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint.h ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/randint_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
27
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
28
+ return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
29
+ }
30
+ namespace symint {
31
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
32
+ at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
33
+ return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
34
+ }
35
+ }
36
+
37
+ // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
38
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
39
+ return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
40
+ }
41
+ namespace symint {
42
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
43
+ at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
44
+ return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
45
+ }
46
+ }
47
+
48
+ // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
49
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
50
+ return at::_ops::randint::call(high, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
51
+ }
52
+ namespace symint {
53
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
54
+ at::Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
55
+ return at::_ops::randint::call(high, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
56
+ }
57
+ }
58
+
59
+ // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
60
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
61
+ return at::_ops::randint::call(high, size, dtype, layout, device, pin_memory);
62
+ }
63
+ namespace symint {
64
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
65
+ at::Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
66
+ return at::_ops::randint::call(high, size, dtype, layout, device, pin_memory);
67
+ }
68
+ }
69
+
70
+ // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
71
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
72
+ return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
73
+ }
74
+ namespace symint {
75
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
76
+ at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
77
+ return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
78
+ }
79
+ }
80
+
81
+ // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
82
+ inline at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
83
+ return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
84
+ }
85
+ namespace symint {
86
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
87
+ at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
88
+ return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
89
+ }
90
+ }
91
+
92
+ // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
93
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
94
+ return at::_ops::randint_generator::call(high, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
95
+ }
96
+ namespace symint {
97
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
98
+ at::Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
99
+ return at::_ops::randint_generator::call(high, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
100
+ }
101
+ }
102
+
103
+ // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
104
+ inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
105
+ return at::_ops::randint_generator::call(high, size, generator, dtype, layout, device, pin_memory);
106
+ }
107
+ namespace symint {
108
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
109
+ at::Tensor randint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
110
+ return at::_ops::randint_generator::call(high, size, generator, dtype, layout, device, pin_memory);
111
+ }
112
+ }
113
+
114
+ // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
115
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
116
+ return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
117
+ }
118
+ namespace symint {
119
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
120
+ at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
121
+ return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
122
+ }
123
+ }
124
+
125
+ // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
126
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
127
+ return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
128
+ }
129
+ namespace symint {
130
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
131
+ at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
132
+ return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
133
+ }
134
+ }
135
+
136
+ // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
137
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
138
+ return at::_ops::randint_low::call(low, high, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
139
+ }
140
+ namespace symint {
141
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
142
+ at::Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
143
+ return at::_ops::randint_low::call(low, high, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
144
+ }
145
+ }
146
+
147
+ // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
148
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
149
+ return at::_ops::randint_low::call(low, high, size, dtype, layout, device, pin_memory);
150
+ }
151
+ namespace symint {
152
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
153
+ at::Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
154
+ return at::_ops::randint_low::call(low, high, size, dtype, layout, device, pin_memory);
155
+ }
156
+ }
157
+
158
+ // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
159
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
160
+ return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
161
+ }
162
+ namespace symint {
163
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
164
+ at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
165
+ return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
166
+ }
167
+ }
168
+
169
+ // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
170
+ inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
171
+ return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
172
+ }
173
+ namespace symint {
174
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
175
+ at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
176
+ return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
177
+ }
178
+ }
179
+
180
+ // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
181
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
182
+ return at::_ops::randint_low_generator::call(low, high, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
183
+ }
184
+ namespace symint {
185
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
186
+ at::Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
187
+ return at::_ops::randint_low_generator::call(low, high, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
188
+ }
189
+ }
190
+
191
+ // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
192
+ inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
193
+ return at::_ops::randint_low_generator::call(low, high, size, generator, dtype, layout, device, pin_memory);
194
+ }
195
+ namespace symint {
196
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
197
+ at::Tensor randint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
198
+ return at::_ops::randint_low_generator::call(low, high, size, generator, dtype, layout, device, pin_memory);
199
+ }
200
+ }
201
+
202
+ // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
203
+ inline at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size) {
204
+ return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
205
+ }
206
+ namespace symint {
207
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
208
+ at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size) {
209
+ return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
210
+ }
211
+ }
212
+
213
+ // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
214
+ inline at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor & out) {
215
+ return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
216
+ }
217
+ namespace symint {
218
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
219
+ at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor & out) {
220
+ return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
221
+ }
222
+ }
223
+
224
+ // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
225
+ inline at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size) {
226
+ return at::_ops::randint_out::call(high, size, out);
227
+ }
228
+ namespace symint {
229
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
230
+ at::Tensor & randint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size) {
231
+ return at::_ops::randint_out::call(high, size, out);
232
+ }
233
+ }
234
+
235
+ // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
236
+ inline at::Tensor & randint_symint_outf(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
237
+ return at::_ops::randint_out::call(high, size, out);
238
+ }
239
+ namespace symint {
240
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
241
+ at::Tensor & randint_outf(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
242
+ return at::_ops::randint_out::call(high, size, out);
243
+ }
244
+ }
245
+
246
+ // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
247
+ inline at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
248
+ return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
249
+ }
250
+ namespace symint {
251
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
252
+ at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
253
+ return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
254
+ }
255
+ }
256
+
257
+ // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
258
+ inline at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
259
+ return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
260
+ }
261
+ namespace symint {
262
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
263
+ at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
264
+ return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
265
+ }
266
+ }
267
+
268
+ // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
269
+ inline at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
270
+ return at::_ops::randint_generator_out::call(high, size, generator, out);
271
+ }
272
+ namespace symint {
273
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
274
+ at::Tensor & randint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
275
+ return at::_ops::randint_generator_out::call(high, size, generator, out);
276
+ }
277
+ }
278
+
279
+ // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
280
+ inline at::Tensor & randint_symint_outf(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
281
+ return at::_ops::randint_generator_out::call(high, size, generator, out);
282
+ }
283
+ namespace symint {
284
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
285
+ at::Tensor & randint_outf(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
286
+ return at::_ops::randint_generator_out::call(high, size, generator, out);
287
+ }
288
+ }
289
+
290
+ // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
291
+ inline at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size) {
292
+ return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
293
+ }
294
+ namespace symint {
295
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
296
+ at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size) {
297
+ return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
298
+ }
299
+ }
300
+
301
+ // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
302
+ inline at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out) {
303
+ return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
304
+ }
305
+ namespace symint {
306
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
307
+ at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out) {
308
+ return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
309
+ }
310
+ }
311
+
312
+ // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
313
+ inline at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size) {
314
+ return at::_ops::randint_low_out::call(low, high, size, out);
315
+ }
316
+ namespace symint {
317
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
318
+ at::Tensor & randint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size) {
319
+ return at::_ops::randint_low_out::call(low, high, size, out);
320
+ }
321
+ }
322
+
323
+ // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
324
+ inline at::Tensor & randint_symint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
325
+ return at::_ops::randint_low_out::call(low, high, size, out);
326
+ }
327
+ namespace symint {
328
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
329
+ at::Tensor & randint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
330
+ return at::_ops::randint_low_out::call(low, high, size, out);
331
+ }
332
+ }
333
+
334
+ // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
335
+ inline at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
336
+ return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
337
+ }
338
+ namespace symint {
339
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
340
+ at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
341
+ return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
342
+ }
343
+ }
344
+
345
+ // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
346
+ inline at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
347
+ return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
348
+ }
349
+ namespace symint {
350
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
351
+ at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
352
+ return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
353
+ }
354
+ }
355
+
356
+ // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
357
+ inline at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
358
+ return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
359
+ }
360
+ namespace symint {
361
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
362
+ at::Tensor & randint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
363
+ return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
364
+ }
365
+ }
366
+
367
+ // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
368
+ inline at::Tensor & randint_symint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
369
+ return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
370
+ }
371
+ namespace symint {
372
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
373
+ at::Tensor & randint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
374
+ return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
375
+ }
376
+ }
377
+
378
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong);
21
+ TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
22
+ TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong);
23
+ TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
24
+ TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size);
25
+ TORCH_API at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor & out);
26
+ TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size);
27
+ TORCH_API at::Tensor & randint_symint_outf(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out);
28
+ TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong);
29
+ TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
30
+ TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong);
31
+ TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
32
+ TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator);
33
+ TORCH_API at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
34
+ TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator);
35
+ TORCH_API at::Tensor & randint_symint_outf(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
36
+ TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong);
37
+ TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
38
+ TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong);
39
+ TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
40
+ TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size);
41
+ TORCH_API at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out);
42
+ TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size);
43
+ TORCH_API at::Tensor & randint_symint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out);
44
+ TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong);
45
+ TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
46
+ TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options=at::kLong);
47
+ TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
48
+ TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator);
49
+ TORCH_API at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
50
+ TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator);
51
+ TORCH_API at::Tensor & randint_symint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
52
+
53
+ } // namespace compositeexplicitautograd
54
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/randint_like_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
27
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
28
+ return at::_ops::randint_like::call(self, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
29
+ }
30
+ namespace symint {
31
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
32
+ at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
33
+ return at::_ops::randint_like::call(self, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
34
+ }
35
+ }
36
+
37
+ // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
38
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
39
+ return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
40
+ }
41
+ namespace symint {
42
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
43
+ at::Tensor randint_like(const at::Tensor & self, int64_t high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
44
+ return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
45
+ }
46
+ }
47
+
48
+ // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
49
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
50
+ return at::_ops::randint_like::call(self, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
51
+ }
52
+ namespace symint {
53
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
54
+ at::Tensor randint_like(const at::Tensor & self, c10::SymInt high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
55
+ return at::_ops::randint_like::call(self, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
56
+ }
57
+ }
58
+
59
+ // aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
60
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
61
+ return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
62
+ }
63
+ namespace symint {
64
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
65
+ at::Tensor randint_like(const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
66
+ return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
67
+ }
68
+ }
69
+
70
+ // aten::randint_like.Tensor(Tensor self, Tensor high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
71
+ inline at::Tensor randint_like(const at::Tensor & self, const at::Tensor & high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
72
+ return at::_ops::randint_like_Tensor::call(self, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
73
+ }
74
+ // aten::randint_like.Tensor(Tensor self, Tensor high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
75
+ inline at::Tensor randint_like(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
76
+ return at::_ops::randint_like_Tensor::call(self, high, dtype, layout, device, pin_memory, memory_format);
77
+ }
78
+
79
+ // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
80
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
81
+ return at::_ops::randint_like_low_dtype::call(self, low, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
82
+ }
83
+ namespace symint {
84
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
85
+ at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
86
+ return at::_ops::randint_like_low_dtype::call(self, low, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
87
+ }
88
+ }
89
+
90
+ // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
91
+ inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
92
+ return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
93
+ }
94
+ namespace symint {
95
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
96
+ at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
97
+ return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
98
+ }
99
+ }
100
+
101
+ // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
102
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
103
+ return at::_ops::randint_like_low_dtype::call(self, low, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
104
+ }
105
+ namespace symint {
106
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
107
+ at::Tensor randint_like(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
108
+ return at::_ops::randint_like_low_dtype::call(self, low, high, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
109
+ }
110
+ }
111
+
112
+ // aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
113
+ inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
114
+ return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
115
+ }
116
+ namespace symint {
117
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
118
+ at::Tensor randint_like(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
119
+ return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
120
+ }
121
+ }
122
+
123
+ // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
124
+ inline at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
125
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
126
+ }
127
+ namespace symint {
128
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
129
+ at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
130
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
131
+ }
132
+ }
133
+
134
+ // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
135
+ inline at::Tensor & randint_like_outf(const at::Tensor & self, int64_t high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
136
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
137
+ }
138
+ namespace symint {
139
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
140
+ at::Tensor & randint_like_outf(const at::Tensor & self, int64_t high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
141
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
142
+ }
143
+ }
144
+
145
+ // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
146
+ inline at::Tensor & randint_like_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
147
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
148
+ }
149
+ namespace symint {
150
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
151
+ at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
152
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
153
+ }
154
+ }
155
+
156
+ // aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
157
+ inline at::Tensor & randint_like_symint_outf(const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
158
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
159
+ }
160
+ namespace symint {
161
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
162
+ at::Tensor & randint_like_outf(const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
163
+ return at::_ops::randint_like_out::call(self, high, memory_format, out);
164
+ }
165
+ }
166
+
167
+ // aten::randint_like.Tensor_out(Tensor self, Tensor high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
168
+ inline at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
169
+ return at::_ops::randint_like_Tensor_out::call(self, high, memory_format, out);
170
+ }
171
+ // aten::randint_like.Tensor_out(Tensor self, Tensor high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
172
+ inline at::Tensor & randint_like_outf(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
173
+ return at::_ops::randint_like_Tensor_out::call(self, high, memory_format, out);
174
+ }
175
+
176
+ // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
177
+ inline at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
178
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
179
+ }
180
+ namespace symint {
181
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
182
+ at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
183
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
184
+ }
185
+ }
186
+
187
+ // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
188
+ inline at::Tensor & randint_like_outf(const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
189
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
190
+ }
191
+ namespace symint {
192
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
193
+ at::Tensor & randint_like_outf(const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
194
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
195
+ }
196
+ }
197
+
198
+ // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
199
+ inline at::Tensor & randint_like_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
200
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
201
+ }
202
+ namespace symint {
203
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
204
+ at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
205
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
206
+ }
207
+ }
208
+
209
+ // aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
210
+ inline at::Tensor & randint_like_symint_outf(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
211
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
212
+ }
213
+ namespace symint {
214
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
215
+ at::Tensor & randint_like_outf(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
216
+ return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
217
+ }
218
+ }
219
+
220
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
21
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
22
+ TORCH_API at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
23
+ TORCH_API at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
24
+ TORCH_API at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
25
+ TORCH_API at::Tensor & randint_like_outf(const at::Tensor & self, int64_t high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
26
+ TORCH_API at::Tensor & randint_like_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
27
+ TORCH_API at::Tensor & randint_like_symint_outf(const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
28
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, const at::Tensor & high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
29
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
30
+ TORCH_API at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
31
+ TORCH_API at::Tensor & randint_like_outf(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
32
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
33
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
34
+ TORCH_API at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
35
+ TORCH_API at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
36
+ TORCH_API at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
37
+ TORCH_API at::Tensor & randint_like_outf(const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
38
+ TORCH_API at::Tensor & randint_like_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
39
+ TORCH_API at::Tensor & randint_like_symint_outf(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
40
+
41
+ } // namespace compositeexplicitautograd
42
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_native.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t high, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
20
+ TORCH_API at::Tensor & randint_like_out_symint(const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
21
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
22
+ TORCH_API at::Tensor & randint_like_Tensor_out(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
23
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
24
+ TORCH_API at::Tensor & randint_like_low_dtype_out_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
25
+ } // namespace native
26
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_ops.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API randint_like {
19
+ using schema = at::Tensor (const at::Tensor &, c10::SymInt, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, ::std::optional<at::MemoryFormat>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::randint_like";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
27
+ };
28
+
29
+ struct TORCH_API randint_like_Tensor {
30
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, ::std::optional<at::MemoryFormat>);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::randint_like";
34
+ static constexpr const char* overload_name = "Tensor";
35
+ static constexpr const char* schema_str = "randint_like.Tensor(Tensor self, Tensor high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor";
36
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
37
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
38
+ };
39
+
40
+ struct TORCH_API randint_like_low_dtype {
41
+ using schema = at::Tensor (const at::Tensor &, c10::SymInt, c10::SymInt, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, ::std::optional<at::MemoryFormat>);
42
+ using ptr_schema = schema*;
43
+ // See Note [static constexpr char* members for windows NVCC]
44
+ static constexpr const char* name = "aten::randint_like";
45
+ static constexpr const char* overload_name = "low_dtype";
46
+ static constexpr const char* schema_str = "randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor";
47
+ static at::Tensor call(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
48
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
49
+ };
50
+
51
+ struct TORCH_API randint_like_out {
52
+ using schema = at::Tensor & (const at::Tensor &, c10::SymInt, ::std::optional<at::MemoryFormat>, at::Tensor &);
53
+ using ptr_schema = schema*;
54
+ // See Note [static constexpr char* members for windows NVCC]
55
+ static constexpr const char* name = "aten::randint_like";
56
+ static constexpr const char* overload_name = "out";
57
+ static constexpr const char* schema_str = "randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)";
58
+ static at::Tensor & call(const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
59
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
60
+ };
61
+
62
+ struct TORCH_API randint_like_Tensor_out {
63
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional<at::MemoryFormat>, at::Tensor &);
64
+ using ptr_schema = schema*;
65
+ // See Note [static constexpr char* members for windows NVCC]
66
+ static constexpr const char* name = "aten::randint_like";
67
+ static constexpr const char* overload_name = "Tensor_out";
68
+ static constexpr const char* schema_str = "randint_like.Tensor_out(Tensor self, Tensor high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)";
69
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
70
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
71
+ };
72
+
73
+ struct TORCH_API randint_like_low_dtype_out {
74
+ using schema = at::Tensor & (const at::Tensor &, c10::SymInt, c10::SymInt, ::std::optional<at::MemoryFormat>, at::Tensor &);
75
+ using ptr_schema = schema*;
76
+ // See Note [static constexpr char* members for windows NVCC]
77
+ static constexpr const char* name = "aten::randint_like";
78
+ static constexpr const char* overload_name = "low_dtype_out";
79
+ static constexpr const char* schema_str = "randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)";
80
+ static at::Tensor & call(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
81
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
82
+ };
83
+
84
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_native.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
20
+ TORCH_API at::Tensor & randint_out(int64_t high, at::IntArrayRef size, at::Tensor & out);
21
+ TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
22
+ TORCH_API at::Tensor & randint_out(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
23
+ TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
24
+ TORCH_API at::Tensor & randint_out(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out);
25
+ TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
26
+ TORCH_API at::Tensor & randint_out(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
27
+ } // namespace native
28
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randint_ops.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API randint {
19
+ using schema = at::Tensor (c10::SymInt, c10::SymIntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::randint";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
25
+ static at::Tensor call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
27
+ };
28
+
29
+ struct TORCH_API randint_generator {
30
+ using schema = at::Tensor (c10::SymInt, c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::randint";
34
+ static constexpr const char* overload_name = "generator";
35
+ static constexpr const char* schema_str = "randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
36
+ static at::Tensor call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
37
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
38
+ };
39
+
40
+ struct TORCH_API randint_low {
41
+ using schema = at::Tensor (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
42
+ using ptr_schema = schema*;
43
+ // See Note [static constexpr char* members for windows NVCC]
44
+ static constexpr const char* name = "aten::randint";
45
+ static constexpr const char* overload_name = "low";
46
+ static constexpr const char* schema_str = "randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
47
+ static at::Tensor call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
48
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
49
+ };
50
+
51
+ struct TORCH_API randint_low_generator {
52
+ using schema = at::Tensor (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
53
+ using ptr_schema = schema*;
54
+ // See Note [static constexpr char* members for windows NVCC]
55
+ static constexpr const char* name = "aten::randint";
56
+ static constexpr const char* overload_name = "low_generator";
57
+ static constexpr const char* schema_str = "randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
58
+ static at::Tensor call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
59
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
60
+ };
61
+
62
+ struct TORCH_API randint_out {
63
+ using schema = at::Tensor & (c10::SymInt, c10::SymIntArrayRef, at::Tensor &);
64
+ using ptr_schema = schema*;
65
+ // See Note [static constexpr char* members for windows NVCC]
66
+ static constexpr const char* name = "aten::randint";
67
+ static constexpr const char* overload_name = "out";
68
+ static constexpr const char* schema_str = "randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)";
69
+ static at::Tensor & call(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out);
70
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out);
71
+ };
72
+
73
+ struct TORCH_API randint_generator_out {
74
+ using schema = at::Tensor & (c10::SymInt, c10::SymIntArrayRef, ::std::optional<at::Generator>, at::Tensor &);
75
+ using ptr_schema = schema*;
76
+ // See Note [static constexpr char* members for windows NVCC]
77
+ static constexpr const char* name = "aten::randint";
78
+ static constexpr const char* overload_name = "generator_out";
79
+ static constexpr const char* schema_str = "randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)";
80
+ static at::Tensor & call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
81
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
82
+ };
83
+
84
+ struct TORCH_API randint_low_out {
85
+ using schema = at::Tensor & (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, at::Tensor &);
86
+ using ptr_schema = schema*;
87
+ // See Note [static constexpr char* members for windows NVCC]
88
+ static constexpr const char* name = "aten::randint";
89
+ static constexpr const char* overload_name = "low_out";
90
+ static constexpr const char* schema_str = "randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)";
91
+ static at::Tensor & call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out);
92
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out);
93
+ };
94
+
95
+ struct TORCH_API randint_low_generator_out {
96
+ using schema = at::Tensor & (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, ::std::optional<at::Generator>, at::Tensor &);
97
+ using ptr_schema = schema*;
98
+ // See Note [static constexpr char* members for windows NVCC]
99
+ static constexpr const char* name = "aten::randint";
100
+ static constexpr const char* overload_name = "low_generator_out";
101
+ static constexpr const char* schema_str = "randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)";
102
+ static at::Tensor & call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
103
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
104
+ };
105
+
106
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn.h ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/randn_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
27
+ inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) {
28
+ return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
29
+ }
30
+ namespace symint {
31
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
32
+ at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) {
33
+ return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
34
+ }
35
+ }
36
+
37
+ // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
38
+ inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
39
+ return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
40
+ }
41
+ namespace symint {
42
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
43
+ at::Tensor randn(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
44
+ return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
45
+ }
46
+ }
47
+
48
+ // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
49
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) {
50
+ return at::_ops::randn::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
51
+ }
52
+ namespace symint {
53
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
54
+ at::Tensor randn(c10::SymIntArrayRef size, at::TensorOptions options={}) {
55
+ return at::_ops::randn::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
56
+ }
57
+ }
58
+
59
+ // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
60
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
61
+ return at::_ops::randn::call(size, dtype, layout, device, pin_memory);
62
+ }
63
+ namespace symint {
64
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
65
+ at::Tensor randn(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
66
+ return at::_ops::randn::call(size, dtype, layout, device, pin_memory);
67
+ }
68
+ }
69
+
70
+ // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
71
+ inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
72
+ return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
73
+ }
74
+ namespace symint {
75
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
76
+ at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
77
+ return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
78
+ }
79
+ }
80
+
81
+ // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
82
+ inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
83
+ return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
84
+ }
85
+ namespace symint {
86
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
87
+ at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
88
+ return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
89
+ }
90
+ }
91
+
92
+ // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
93
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
94
+ return at::_ops::randn_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
95
+ }
96
+ namespace symint {
97
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
98
+ at::Tensor randn(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={}) {
99
+ return at::_ops::randn_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
100
+ }
101
+ }
102
+
103
+ // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
104
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
105
+ return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory);
106
+ }
107
+ namespace symint {
108
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
109
+ at::Tensor randn(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
110
+ return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory);
111
+ }
112
+ }
113
+
114
+ // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
115
+ inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
116
+ return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
117
+ }
118
+ namespace symint {
119
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
120
+ at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
121
+ return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
122
+ }
123
+ }
124
+
125
+ // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
126
+ inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
127
+ return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
128
+ }
129
+ namespace symint {
130
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
131
+ at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
132
+ return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
133
+ }
134
+ }
135
+
136
+ // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
137
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
138
+ return at::_ops::randn_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
139
+ }
140
+ namespace symint {
141
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
142
+ at::Tensor randn(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
143
+ return at::_ops::randn_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
144
+ }
145
+ }
146
+
147
+ // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
148
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
149
+ return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory);
150
+ }
151
+ namespace symint {
152
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
153
+ at::Tensor randn(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
154
+ return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory);
155
+ }
156
+ }
157
+
158
+ // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
159
+ inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
160
+ return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
161
+ }
162
+ namespace symint {
163
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
164
+ at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
165
+ return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
166
+ }
167
+ }
168
+
169
+ // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
170
+ inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
171
+ return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
172
+ }
173
+ namespace symint {
174
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
175
+ at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
176
+ return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
177
+ }
178
+ }
179
+
180
+ // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
181
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
182
+ return at::_ops::randn_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
183
+ }
184
+ namespace symint {
185
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
186
+ at::Tensor randn(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={}) {
187
+ return at::_ops::randn_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
188
+ }
189
+ }
190
+
191
+ // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
192
+ inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
193
+ return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
194
+ }
195
+ namespace symint {
196
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
197
+ at::Tensor randn(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
198
+ return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
199
+ }
200
+ }
201
+
202
+ // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
203
+ inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) {
204
+ return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
205
+ }
206
+ namespace symint {
207
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
208
+ at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) {
209
+ return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
210
+ }
211
+ }
212
+
213
+ // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
214
+ inline at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) {
215
+ return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
216
+ }
217
+ namespace symint {
218
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
219
+ at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) {
220
+ return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
221
+ }
222
+ }
223
+
224
+ // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
225
+ inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
226
+ return at::_ops::randn_out::call(size, out);
227
+ }
228
+ namespace symint {
229
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
230
+ at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size) {
231
+ return at::_ops::randn_out::call(size, out);
232
+ }
233
+ }
234
+
235
+ // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
236
+ inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
237
+ return at::_ops::randn_out::call(size, out);
238
+ }
239
+ namespace symint {
240
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
241
+ at::Tensor & randn_outf(c10::SymIntArrayRef size, at::Tensor & out) {
242
+ return at::_ops::randn_out::call(size, out);
243
+ }
244
+ }
245
+
246
+ // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
247
+ inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
248
+ return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
249
+ }
250
+ namespace symint {
251
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
252
+ at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator) {
253
+ return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
254
+ }
255
+ }
256
+
257
+ // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
258
+ inline at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
259
+ return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
260
+ }
261
+ namespace symint {
262
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
263
+ at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
264
+ return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
265
+ }
266
+ }
267
+
268
+ // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
269
+ inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
270
+ return at::_ops::randn_generator_out::call(size, generator, out);
271
+ }
272
+ namespace symint {
273
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
274
+ at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator) {
275
+ return at::_ops::randn_generator_out::call(size, generator, out);
276
+ }
277
+ }
278
+
279
+ // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
280
+ inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
281
+ return at::_ops::randn_generator_out::call(size, generator, out);
282
+ }
283
+ namespace symint {
284
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
285
+ at::Tensor & randn_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
286
+ return at::_ops::randn_generator_out::call(size, generator, out);
287
+ }
288
+ }
289
+
290
+ // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
291
+ inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::DimnameList> names) {
292
+ return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
293
+ }
294
+ namespace symint {
295
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
296
+ at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::DimnameList> names) {
297
+ return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
298
+ }
299
+ }
300
+
301
+ // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
302
+ inline at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
303
+ return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
304
+ }
305
+ namespace symint {
306
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
307
+ at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
308
+ return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
309
+ }
310
+ }
311
+
312
+ // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
313
+ inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names) {
314
+ return at::_ops::randn_names_out::call(size, names, out);
315
+ }
316
+ namespace symint {
317
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
318
+ at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names) {
319
+ return at::_ops::randn_names_out::call(size, names, out);
320
+ }
321
+ }
322
+
323
+ // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
324
+ inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
325
+ return at::_ops::randn_names_out::call(size, names, out);
326
+ }
327
+ namespace symint {
328
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
329
+ at::Tensor & randn_outf(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
330
+ return at::_ops::randn_names_out::call(size, names, out);
331
+ }
332
+ }
333
+
334
+ // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
335
+ inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
336
+ return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
337
+ }
338
+ namespace symint {
339
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
340
+ at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
341
+ return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
342
+ }
343
+ }
344
+
345
+ // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
346
+ inline at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
347
+ return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
348
+ }
349
+ namespace symint {
350
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
351
+ at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
352
+ return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
353
+ }
354
+ }
355
+
356
+ // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
357
+ inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
358
+ return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
359
+ }
360
+ namespace symint {
361
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
362
+ at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names) {
363
+ return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
364
+ }
365
+ }
366
+
367
+ // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
368
+ inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
369
+ return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
370
+ }
371
+ namespace symint {
372
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
373
+ at::Tensor & randn_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
374
+ return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
375
+ }
376
+ }
377
+
378
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={});
21
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
22
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={});
23
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
24
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={});
25
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options={});
27
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
28
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
29
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
30
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
31
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
32
+ TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::DimnameList> names);
33
+ TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
34
+ TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names);
35
+ TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
36
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
37
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
38
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options={});
39
+ TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
40
+ TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names);
41
+ TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
42
+ TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names);
43
+ TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
44
+
45
+ } // namespace compositeexplicitautograd
46
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size);
21
+ TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out);
22
+ TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size);
23
+ TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out);
24
+ TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional<at::Generator> generator);
25
+ TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
26
+ TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator);
27
+ TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
28
+
29
+ } // namespace compositeimplicitautograd
30
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/randn_like_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
27
+ inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
28
+ return at::_ops::randn_like::call(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
29
+ }
30
+ // aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
31
+ inline at::Tensor randn_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
32
+ return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format);
33
+ }
34
+
35
+ // aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & randn_like_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt) {
37
+ return at::_ops::randn_like_out::call(self, memory_format, out);
38
+ }
39
+ // aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & randn_like_outf(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
41
+ return at::_ops::randn_like_out::call(self, memory_format, out);
42
+ }
43
+
44
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
21
+ TORCH_API at::Tensor randn_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
22
+ TORCH_API at::Tensor & randn_like_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
23
+ TORCH_API at::Tensor & randn_like_outf(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_compositeimplicitautogradnestedtensor_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautogradnestedtensor {
19
+
20
+ TORCH_API at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
21
+ TORCH_API at::Tensor randn_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
22
+
23
+ } // namespace compositeimplicitautogradnestedtensor
24
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor randn_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={}, ::std::optional<at::MemoryFormat> memory_format=::std::nullopt);
20
+ TORCH_API at::Tensor & randn_like_out(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_ops.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API randn_like {
19
+ using schema = at::Tensor (const at::Tensor &, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, ::std::optional<at::MemoryFormat>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::randn_like";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor";
25
+ static at::Tensor call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format);
27
+ };
28
+
29
+ struct TORCH_API randn_like_out {
30
+ using schema = at::Tensor & (const at::Tensor &, ::std::optional<at::MemoryFormat>, at::Tensor &);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::randn_like";
34
+ static constexpr const char* overload_name = "out";
35
+ static constexpr const char* schema_str = "randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)";
36
+ static at::Tensor & call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
37
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
38
+ };
39
+
40
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_native.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & randn_out(at::IntArrayRef size, at::Tensor & out);
20
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
21
+ TORCH_API at::Tensor & randn_out(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
22
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
23
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
24
+ TORCH_API at::Tensor & randn_names_out_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
25
+ TORCH_API at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
26
+ TORCH_API at::Tensor & randn_generator_with_names_out_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
27
+ } // namespace native
28
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_ops.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <string_view>
6
+ #include <tuple>
7
+ #include <vector>
8
+
9
+ // Forward declarations of any types needed in the operator signatures.
10
+ // We can't directly include these classes because it will cause circular include dependencies.
11
+ // This file is included by TensorBody.h, which defines the Tensor class.
12
+ #include <ATen/core/ATen_fwd.h>
13
+
14
+ namespace at {
15
+ namespace _ops {
16
+
17
+
18
+ struct TORCH_API randn {
19
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
20
+ using ptr_schema = schema*;
21
+ // See Note [static constexpr char* members for windows NVCC]
22
+ static constexpr const char* name = "aten::randn";
23
+ static constexpr const char* overload_name = "";
24
+ static constexpr const char* schema_str = "randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
25
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
27
+ };
28
+
29
+ struct TORCH_API randn_generator {
30
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
31
+ using ptr_schema = schema*;
32
+ // See Note [static constexpr char* members for windows NVCC]
33
+ static constexpr const char* name = "aten::randn";
34
+ static constexpr const char* overload_name = "generator";
35
+ static constexpr const char* schema_str = "randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
36
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
37
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
38
+ };
39
+
40
+ struct TORCH_API randn_names {
41
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::DimnameList>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
42
+ using ptr_schema = schema*;
43
+ // See Note [static constexpr char* members for windows NVCC]
44
+ static constexpr const char* name = "aten::randn";
45
+ static constexpr const char* overload_name = "names";
46
+ static constexpr const char* schema_str = "randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
47
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
48
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
49
+ };
50
+
51
+ struct TORCH_API randn_generator_with_names {
52
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::DimnameList>, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
53
+ using ptr_schema = schema*;
54
+ // See Note [static constexpr char* members for windows NVCC]
55
+ static constexpr const char* name = "aten::randn";
56
+ static constexpr const char* overload_name = "generator_with_names";
57
+ static constexpr const char* schema_str = "randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor";
58
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
59
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
60
+ };
61
+
62
+ struct TORCH_API randn_out {
63
+ using schema = at::Tensor & (c10::SymIntArrayRef, at::Tensor &);
64
+ using ptr_schema = schema*;
65
+ // See Note [static constexpr char* members for windows NVCC]
66
+ static constexpr const char* name = "aten::randn";
67
+ static constexpr const char* overload_name = "out";
68
+ static constexpr const char* schema_str = "randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)";
69
+ static at::Tensor & call(c10::SymIntArrayRef size, at::Tensor & out);
70
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out);
71
+ };
72
+
73
+ struct TORCH_API randn_generator_out {
74
+ using schema = at::Tensor & (c10::SymIntArrayRef, ::std::optional<at::Generator>, at::Tensor &);
75
+ using ptr_schema = schema*;
76
+ // See Note [static constexpr char* members for windows NVCC]
77
+ static constexpr const char* name = "aten::randn";
78
+ static constexpr const char* overload_name = "generator_out";
79
+ static constexpr const char* schema_str = "randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)";
80
+ static at::Tensor & call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
81
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out);
82
+ };
83
+
84
+ struct TORCH_API randn_names_out {
85
+ using schema = at::Tensor & (c10::SymIntArrayRef, ::std::optional<at::DimnameList>, at::Tensor &);
86
+ using ptr_schema = schema*;
87
+ // See Note [static constexpr char* members for windows NVCC]
88
+ static constexpr const char* name = "aten::randn";
89
+ static constexpr const char* overload_name = "names_out";
90
+ static constexpr const char* schema_str = "randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)";
91
+ static at::Tensor & call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
92
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out);
93
+ };
94
+
95
+ struct TORCH_API randn_generator_with_names_out {
96
+ using schema = at::Tensor & (c10::SymIntArrayRef, ::std::optional<at::Generator>, ::std::optional<at::DimnameList>, at::Tensor &);
97
+ using ptr_schema = schema*;
98
+ // See Note [static constexpr char* members for windows NVCC]
99
+ static constexpr const char* name = "aten::randn";
100
+ static constexpr const char* overload_name = "generator_with_names_out";
101
+ static constexpr const char* schema_str = "randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)";
102
+ static at::Tensor & call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
103
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out);
104
+ };
105
+
106
+ }} // namespace at::_ops
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+ #include <string_view>
18
+
19
+
20
+
21
+ #include <ATen/ops/random_ops.h>
22
+
23
+ namespace at {
24
+
25
+
26
+ // aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
27
+ inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt) {
28
+ return at::_ops::random_from_out::call(self, from, to, generator, out);
29
+ }
30
+ // aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & random_outf(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator, at::Tensor & out) {
32
+ return at::_ops::random_from_out::call(self, from, to, generator, out);
33
+ }
34
+
35
+ // aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
36
+ inline at::Tensor random(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt) {
37
+ return at::_ops::random_from::call(self, from, to, generator);
38
+ }
39
+
40
+ // aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
41
+ inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt) {
42
+ return at::_ops::random_to_out::call(self, to, generator, out);
43
+ }
44
+ // aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
45
+ inline at::Tensor & random_outf(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator, at::Tensor & out) {
46
+ return at::_ops::random_to_out::call(self, to, generator, out);
47
+ }
48
+
49
+ // aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
50
+ inline at::Tensor random(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt) {
51
+ return at::_ops::random_to::call(self, to, generator);
52
+ }
53
+
54
+ // aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
55
+ inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt) {
56
+ return at::_ops::random_out::call(self, generator, out);
57
+ }
58
+ // aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & random_outf(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
60
+ return at::_ops::random_out::call(self, generator, out);
61
+ }
62
+
63
+ // aten::random(Tensor self, *, Generator? generator=None) -> Tensor
64
+ inline at::Tensor random(const at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt) {
65
+ return at::_ops::random::call(self, generator);
66
+ }
67
+
68
+ }
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor random(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
21
+ TORCH_API at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
22
+ TORCH_API at::Tensor & random_outf(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator, at::Tensor & out);
23
+ TORCH_API at::Tensor random(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
24
+ TORCH_API at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
25
+ TORCH_API at::Tensor & random_outf(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator, at::Tensor & out);
26
+ TORCH_API at::Tensor random(const at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
27
+ TORCH_API at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
28
+ TORCH_API at::Tensor & random_outf(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out);
29
+
30
+ } // namespace compositeexplicitautograd
31
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
21
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
22
+ TORCH_API at::Tensor & random_(at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
21
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
22
+ TORCH_API at::Tensor & random_(at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_meta_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
21
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
22
+ TORCH_API at::Tensor & random_(at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
23
+
24
+ } // namespace meta
25
+ } // namespace at
Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/random_native.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor random(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
20
+ TORCH_API at::Tensor & random_from_out(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator, at::Tensor & out);
21
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
22
+ TORCH_API at::Tensor & random_meta_(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator=::std::nullopt);
23
+ TORCH_API at::Tensor random(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
24
+ TORCH_API at::Tensor & random_to_out(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator, at::Tensor & out);
25
+ TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
26
+ TORCH_API at::Tensor & random_meta_(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator=::std::nullopt);
27
+ TORCH_API at::Tensor random(const at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
28
+ TORCH_API at::Tensor & random_out(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out);
29
+ TORCH_API at::Tensor & random_(at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
30
+ TORCH_API at::Tensor & random_meta_(at::Tensor & self, ::std::optional<at::Generator> generator=::std::nullopt);
31
+ } // namespace native
32
+ } // namespace at