diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..02fdb5eb638007e16a0274df61754590e4cdcfa0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool2d_output { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool2d"; + static constexpr const char* overload_name = "output"; + static constexpr const char* schema_str = "fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); +}; + +struct TORCH_API fractional_max_pool2d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool2d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..c0096a38d965fab846866f62cbd7a9c79112e3d4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices); +} + +// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) +inline ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..7915c23f59ac988d22ddcd30367db764b7a1f821 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input); +} +// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input); +} + +// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor +inline at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e98fe485a2f6aeb02efaae585ac6f08a520834a4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API fractional_max_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..02a82c860f24c22aadf10f242d172fdd9bfc2b37 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ea591768bd37f66e5918f6dd021d352a5cd2941 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..74bdcde879bdc245c29363d54b2b4e60573c6311 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fractional_max_pool3d_out_cpu : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; +struct TORCH_API structured_fractional_max_pool3d_out_cuda : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..96ebaa0dee99e7858ef8d5370b7bdf179c79c968 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool3d_output { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d"; + static constexpr const char* overload_name = "output"; + static constexpr const char* schema_str = "fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); +}; + +struct TORCH_API fractional_max_pool3d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fractional_max_pool3d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp.h new file mode 100644 index 0000000000000000000000000000000000000000..2313ef2dd046bc0db0fd4e89c5d484b9572c0c95 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) +inline ::std::tuple frexp(const at::Tensor & self) { + return at::_ops::frexp_Tensor::call(self); +} + +// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) +inline ::std::tuple frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self) { + return at::_ops::frexp_Tensor_out::call(self, mantissa, exponent); +} +// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) +inline ::std::tuple frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) { + return at::_ops::frexp_Tensor_out::call(self, mantissa, exponent); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7f23ed030ecac100ae8bc0be877ab6b85a91a3e6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple frexp(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e0e57c4b9968c165ef584bb37592a601954d882 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self); +TORCH_API ::std::tuple frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..110d845cfae8b794b2f9d7fffba44174e57ea6f8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self); +TORCH_API ::std::tuple frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..27eac8da1346fb97d0dcc6e3ecd6e4526250dd3e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple frexp(const at::Tensor & self); +TORCH_API ::std::tuple frexp_out(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0a071ff73a43f5721afc0642c9ead02ec98e196c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API frexp_Tensor { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frexp"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)"; + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API frexp_Tensor_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frexp"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)"; + static ::std::tuple call(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..c2f7b88eae85854e60a66643a7e6ae6b7ae1203b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_dim::call(self, dim, keepdim); +} + +// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & frobenius_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_out::call(self, dim, keepdim, out); +} +// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & frobenius_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::frobenius_norm_out::call(self, dim, keepdim, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5ad00b4311a2fadc2c4f5fcfd5223ea427d41fa1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a4afd84d9678136255beda99578984648aecf7c2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ec57db6dc7ebe805da54f8eb993c4046d30a3672 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API frobenius_norm_dim { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frobenius_norm"; + static constexpr const char* overload_name = "dim"; + static constexpr const char* schema_str = "frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API frobenius_norm_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::frobenius_norm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_blob.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_blob.h new file mode 100644 index 0000000000000000000000000000000000000000..a209380abb64e0125c879cde3e8bcfa66908b882 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_blob.h @@ -0,0 +1,167 @@ +#pragma once +#include + +namespace at { + +namespace detail { + +inline void noopDelete(void*) {} + +} // namespace detail + +/// Provides a fluent API to construct tensors from external data. +/// +/// The fluent API can be used instead of `from_blob` functions in case the +/// required set of parameters does not align with the existing overloads. +/// +/// at::Tensor tensor = at::for_blob(data, sizes) +/// .strides(strides) +/// .context(context, [](void *ctx) { delete static_cast(ctx); +/// }) .options(...) .make_tensor(); +/// +class TORCH_API TensorMaker { + friend TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept; + + public: + using ContextDeleter = DeleterFnPtr; + + TensorMaker& strides(OptionalIntArrayRef value) noexcept { + strides_ = value; + + return *this; + } + + TensorMaker& storage_offset(std::optional value) noexcept { + storage_offset_ = value; + + return *this; + } + + TensorMaker& deleter(std::function value) noexcept { + deleter_ = std::move(value); + + return *this; + } + + TensorMaker& context(void* value, ContextDeleter deleter = nullptr) noexcept { + ctx_ = std::unique_ptr{ + value, deleter != nullptr ? deleter : detail::noopDelete}; + + return *this; + } + + TensorMaker& target_device(std::optional value) noexcept { + device_ = value; + + return *this; + } + + TensorMaker& options(TensorOptions value) noexcept { + opts_ = value; + + return *this; + } + + TensorMaker& resizeable_storage() noexcept { + resizeable_ = true; + + return *this; + } + + TensorMaker& allocator(c10::Allocator* allocator) noexcept { + allocator_ = allocator; + + return *this; + } + + Tensor make_tensor(); + + private: + explicit TensorMaker(void* data, IntArrayRef sizes) noexcept + : data_{data}, sizes_{sizes} {} + + std::size_t computeStorageSize() const noexcept; + + DataPtr makeDataPtrFromDeleter() noexcept; + + DataPtr makeDataPtrFromContext() noexcept; + + IntArrayRef makeTempSizes() const noexcept; + + void* data_; + IntArrayRef sizes_; + OptionalIntArrayRef strides_{}; + std::optional storage_offset_{}; + std::function deleter_{}; + std::unique_ptr ctx_{nullptr, detail::noopDelete}; + std::optional device_{}; + TensorOptions opts_{}; + bool resizeable_{}; + c10::Allocator* allocator_{}; +}; + +inline TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept { + return TensorMaker{data, sizes}; +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + IntArrayRef strides, + const std::function& deleter, + const TensorOptions& options = {}, + const std::optional target_device = std::nullopt) { + return for_blob(data, sizes) + .strides(strides) + .deleter(deleter) + .options(options) + .target_device(target_device) + .make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + IntArrayRef strides, + int64_t storage_offset, + const std::function& deleter, + const TensorOptions& options = {}, + const std::optional target_device = std::nullopt) { + return for_blob(data, sizes) + .strides(strides) + .storage_offset(storage_offset) + .deleter(deleter) + .options(options) + .target_device(target_device) + .make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + std::function deleter, + const TensorOptions& options = {}, + const std::optional target_device = std::nullopt) { + return for_blob(data, sizes) + .deleter(std::move(deleter)) + .options(options) + .target_device(target_device) + .make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + IntArrayRef strides, + const TensorOptions& options = {}) { + return for_blob(data, sizes).strides(strides).options(options).make_tensor(); +} + +inline Tensor from_blob( + void* data, + IntArrayRef sizes, + const TensorOptions& options = {}) { + return for_blob(data, sizes).options(options).make_tensor(); +} + +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file.h new file mode 100644 index 0000000000000000000000000000000000000000..ce9f00831d42f4d86a156f448b2ee7cb3134dacb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor from_file(c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0, at::TensorOptions options={}) { + return at::_ops::from_file::call(filename, shared, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor from_file(c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::from_file::call(filename, shared, size, dtype, layout, device, pin_memory); +} + +// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & from_file_out(at::Tensor & out, c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0) { + return at::_ops::from_file_out::call(filename, shared, size, out); +} +// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & from_file_outf(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out) { + return at::_ops::from_file_out::call(filename, shared, size, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3cf1301731c9fb6439c2e6c21a12337e85afc034 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & from_file_out(at::Tensor & out, c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0); +TORCH_API at::Tensor & from_file_outf(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..965a4716bae88023abe23e8280dd84253618385a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0, at::TensorOptions options={}); +TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2b48f31bf1acae54f51ea92d63cd34467f6ea3ec --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & from_file_out(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); +TORCH_API at::Tensor from_file(c10::string_view filename, ::std::optional shared=::std::nullopt, ::std::optional size=0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..218eefbb1faac6d14302d2d0bedfc6995fae10b9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API from_file { + using schema = at::Tensor (c10::string_view, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::from_file"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional shared, ::std::optional size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API from_file_out { + using schema = at::Tensor & (c10::string_view, ::std::optional, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::from_file"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional shared, ::std::optional size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h new file mode 100644 index 0000000000000000000000000000000000000000..463555ed4e84cc556abaaa658254aa03a1bcdb11 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h @@ -0,0 +1,132 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::full_names::call(size, fill_value, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full_names::call(size, fill_value, names, dtype, layout, device, pin_memory); +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template >> + at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); +} +namespace symint { + template >> + at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template >> + at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory); +} +namespace symint { + template >> + at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(size, fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::call(size, fill_value, out); + } +} + +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(size, fill_value, out); +} +namespace symint { + template >> + at::Tensor & full_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::call(size, fill_value, out); + } +} + +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names) { + return at::_ops::full_names_out::call(size, fill_value, names, out); +} +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out) { + return at::_ops::full_names_out::call(size, fill_value, names, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4d85aa566cec158d40123473ea769e3c5563307e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names); +TORCH_API at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +TORCH_API at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h new file mode 100644 index 0000000000000000000000000000000000000000..bc25837adc3dfc99e52ae98b41f05fed0c5fe167 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt) { + return at::_ops::full_like::call(self, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format); +} + +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format=::std::nullopt) { + return at::_ops::full_like_out::call(self, fill_value, memory_format, out); +} +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out) { + return at::_ops::full_like_out::call(self, fill_value, memory_format, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d72131e5182de5ff8c56b88c577a6d670c597493 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +TORCH_API at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f96db01b810bcf8765f46ab59f577c8d370caecd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & full_like_out(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c112c46671918fe48b7f93e2ad4ea147d7e04c88 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API full_like { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full_like"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +}; + +struct TORCH_API full_like_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full_like"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2e714be1dea653028f78c0bf47652c23b6d420e5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & full_names_out(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & full_out(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d7523b18bfdec03525d9f40f9f0f777ee79eea68 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API full_names { + using schema = at::Tensor (at::IntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = "names"; + static constexpr const char* schema_str = "full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API full { + using schema = at::Tensor (c10::SymIntArrayRef, const at::Scalar &, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API full_out { + using schema = at::Tensor & (c10::SymIntArrayRef, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +}; + +struct TORCH_API full_names_out { + using schema = at::Tensor & (at::IntArrayRef, const at::Scalar &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::full"; + static constexpr const char* overload_name = "names_out"; + static constexpr const char* schema_str = "full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h new file mode 100644 index 0000000000000000000000000000000000000000..a3d2d99e9854ca724368bcb0959d3f1b0695c863 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor +inline at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::fused_moving_avg_obs_fake_quant::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..73f6f25f294c9f12c3835023bf1853237858663f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0cb658f448304ac499dc172673828d9fc674b110 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d36abeb289cb105ad6ff5d313bf295200dedacbf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fused_moving_avg_obs_fake_quant { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::fused_moving_avg_obs_fake_quant"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather.h new file mode 100644 index 0000000000000000000000000000000000000000..554c3601b9e3cbecf3f7bd1e5a131bd20b882c63 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_out::call(self, dim, index, sparse_grad, out); +} +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_out::call(self, dim, index, sparse_grad, out); +} + +// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather::call(self, dim, index, sparse_grad); +} + +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out); +} +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out); +} + +// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname::call(self, dim, index, sparse_grad); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..7cee0a131494a664cbb0abf7fe2fe295d01f8f24 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor +inline at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e011fe80c204dbe15109ae566539ecc9f34f68b1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6845ba21d6849ca01daa0d00a871dac3582f551d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c60817958a7cc460f5b0fdeda1b520c889d8c43c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gather_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e257e0bcbdd5ebb32a2692394de5c899a9d1b8c4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..54e866da05dfb7c5afa762c43ba89342cd12913a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b15c129ebe178ce5ed6e1a17b61e2b2aa75bada --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e65e01b1c738445c82198ed1797ceeb296f4c3f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ee8eef74bfcbca3f1f3b35a4a8e29b7295a003b3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gather : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..828620d2bca938902fd15f1b41be0d003022132b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51c768ff7b71a6a2b2fa81f8980367e9b8190aa2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gather_out : public at::meta::structured_gather { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, const at::Tensor & out); +}; +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d7862666139e375e97405779f2058a65e129fcd5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gather_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +struct TORCH_API gather_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = "dimname_out"; + static constexpr const char* schema_str = "gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gather"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h new file mode 100644 index 0000000000000000000000000000000000000000..0d255de0bc329e52e4e196afac4682e60462d7ba --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_out::call(self, other, out); +} +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gcd_out::call(self, other, out); +} + +// aten::gcd(Tensor self, Tensor other) -> Tensor +inline at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd::call(self, other); +} + +// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..50918137f2522ff9cf22356afdadafa2f36843ca --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a7689c34f2e206bbe5a4565d0fb10ffcf1c281b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..25d9eeba2e135814d373f9046fb4fe472afa60bf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6a5117577c035b4e4f331c698d3b360d96781192 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gcd : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b4da98875bd67285dcbe23e315738679c527937e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..446db7321ad862e0ccf56669b39fa4e865cd379d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gcd_out : public at::meta::structured_gcd { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4dc469a25069f4f8220496e67459f29ead4ac7b1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gcd_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gcd"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API gcd { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gcd"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gcd(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API gcd_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gcd_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge.h new file mode 100644 index 0000000000000000000000000000000000000000..86bc45f6aeb8aac5cc9aaa3dc1fed4b8bfba6fc2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar_out::call(self, other, out); +} +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ge_Scalar_out::call(self, other, out); +} + +// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor ge(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar::call(self, other); +} + +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor_out::call(self, other, out); +} +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ge_Tensor_out::call(self, other, out); +} + +// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor ge(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae541f9888e9ee6de2b631a2fcb8d98498db1c06 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df4d5cacf056e8034a01dd16be954f8372f7285b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..31d7d4adbbf922df5553a89357895981a9fa2822 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e488b458e4648d336637dee2bd1939f4bd430a03 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8a943808f081eab3f6287e62a7de913874172403 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_ge_Scalar_out : public at::meta::structured_ge_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor ge_scalar_nested(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_ge_Tensor_out : public at::meta::structured_ge_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor ge_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu.h new file mode 100644 index 0000000000000000000000000000000000000000..53bd9186559f947b68ffd4ae3dc25772853caf07 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_out::call(self, approximate, out); +} +// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) { + return at::_ops::gelu_out::call(self, approximate, out); +} + +// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) +inline at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_::call(self, approximate); +} + +// aten::gelu(Tensor self, *, str approximate='none') -> Tensor +inline at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu::call(self, approximate); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa915dc91bc9194e93fcf99f124570314d57201b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fe5129821001bc6dfbfc55640e2e699b04fe1f83 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf3f261dcd2c9e65f308e42572ccfe7977b7dcdc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..d71ef79331101e3ec035655d67cbea94ff12b666 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward_grad_input::call(grad_output, self, dim, grad_input); +} +// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & glu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) { + return at::_ops::glu_backward_grad_input::call(grad_output, self, dim, grad_input); +} + +// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor +inline at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward::call(grad_output, self, dim); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h new file mode 100644 index 0000000000000000000000000000000000000000..169bf071bde7932bcfd7ca1f67a686869c4c472c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru.h new file mode 100644 index 0000000000000000000000000000000000000000..83d1abc510d3af300f813a3845a2dbabd630a03d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) +inline ::std::tuple gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) +inline ::std::tuple gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..9908e1b7c2d54d86db0296a8ffd5ca157d34279e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +inline at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}) { + return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4384a49b362c3a525b8421471dac1bf8626512a0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dc224424166cc7e9d9dc906dd952d654059386c7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gru_cell { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gru_cell"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_native.h new file mode 100644 index 0000000000000000000000000000000000000000..633e18b28d1ca08eb19a6daab72c524a315b65ff --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..950bff28a199f34cdb0feea30a1cab08321e2110 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gru_input { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gru"; + static constexpr const char* overload_name = "input"; + static constexpr const char* schema_str = "gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API gru_data { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gru"; + static constexpr const char* overload_name = "data"; + static constexpr const char* schema_str = "gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt.h new file mode 100644 index 0000000000000000000000000000000000000000..f83361ca4d0152a30a5056bb7b8e7aee72ccf226 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar_out::call(self, other, out); +} +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::gt_Scalar_out::call(self, other, out); +} + +// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor gt(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar::call(self, other); +} + +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor_out::call(self, other, out); +} +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gt_Tensor_out::call(self, other, out); +} + +// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor gt(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ef43564ff7cfd42ff7b112355af8539fa26c1946 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..602d9653c04510f84157d99cab1a0365f518cbcf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..684fd4f248e55eba684ca669ff53eb8b3ef3aa1a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gt_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_gt_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..359ba036e171128210e518de814c615484bca24d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..50acfcd0d100bb2a5815c43815a71a5a2e0751e2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gt_Scalar_out : public at::meta::structured_gt_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor gt_scalar_nested(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_gt_Tensor_out : public at::meta::structured_gt_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor gt_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8f6957cbb2a1132d8a27920f9e2422d6429b6b11 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gt_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API gt_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "gt.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API gt_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API gt_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "gt.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API gt__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API gt__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gt_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window.h new file mode 100644 index 0000000000000000000000000000000000000000..180243e120c870f55aadfbc24dfb8b6b4f9e44b5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window.h @@ -0,0 +1,98 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hamming_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); +} + +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::hamming_window_out::call(window_length, out); +} +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::hamming_window_out::call(window_length, out); +} + +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out); +} +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out); +} + +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha) { + return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out); +} +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out); +} + +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) { + return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out); +} +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f1e715d831208133c25f8860a1541f289287ad62 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,38 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta); +TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..271855322232c227716848d2fd1c6bdfd6e84e37 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hamming_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_periodic_alpha_out(int64_t window_length, bool periodic, double alpha, at::Tensor & out); +TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hamming_window_periodic_alpha_beta_out(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5f72cfa18ad13d1b2dd620fdf5b76fed2a2af042 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hamming_window { + using schema = at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic { + using schema = at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic"; + static constexpr const char* schema_str = "hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic_alpha { + using schema = at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic_alpha_beta { + using schema = at::Tensor (int64_t, bool, double, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha_beta"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hamming_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API hamming_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_out"; + static constexpr const char* schema_str = "hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +struct TORCH_API hamming_window_periodic_alpha_out { + using schema = at::Tensor & (int64_t, bool, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha_out"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, double alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out); +}; + +struct TORCH_API hamming_window_periodic_alpha_beta_out { + using schema = at::Tensor & (int64_t, bool, double, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hamming_window"; + static constexpr const char* overload_name = "periodic_alpha_beta_out"; + static constexpr const char* schema_str = "hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window.h new file mode 100644 index 0000000000000000000000000000000000000000..76047ee0e4236c9a251cc7467dc7df02c438bf06 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hann_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hann_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hann_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor hann_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::hann_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::hann_window_out::call(window_length, out); +} +// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::hann_window_out::call(window_length, out); +} + +// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hann_window_periodic_out::call(window_length, periodic, out); +} +// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hann_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hann_window_periodic_out::call(window_length, periodic, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8fcf424a82263685734bd2e9783f474e5c7f2f8c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor hann_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor hann_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & hann_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & hann_window_outf(int64_t window_length, bool periodic, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fa91ceddc47270f02ff2ceff9da720097c7d1d3c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hann_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hann_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & hann_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..15fc96652ae378af9fb123540d7f8da5d90d38dd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hann_window { + using schema = at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hann_window_periodic { + using schema = at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = "periodic"; + static constexpr const char* schema_str = "hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API hann_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API hann_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hann_window"; + static constexpr const char* overload_name = "periodic_out"; + static constexpr const char* schema_str = "hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink.h new file mode 100644 index 0000000000000000000000000000000000000000..74d2b9b15fccd9f3831744cc34bb3e421c89dfef --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink_out::call(self, lambd, out); +} +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { + return at::_ops::hardshrink_out::call(self, lambd, out); +} + +// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor +inline at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink::call(self, lambd); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..0a320d5f2b5352a4a47e4a083b5577d67f846c4e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward_grad_input::call(grad_out, self, lambd, grad_input); +} +// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::hardshrink_backward_grad_input::call(grad_out, self, lambd, grad_input); +} + +// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor +inline at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward::call(grad_out, self, lambd); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8dfe7a032a380e0ef452f5a30f6a187b07135744 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..62684b1ac058212095c6435e0c28ed1d72b32125 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca4f68c6950cc5a8a594522ba278599177839b30 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c18aba6b4a1c9d5cdbdda9f9e1ebef79df357fbe --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardshrink_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..819e120a26567a7bc6bb961cadca5b7e1efb3816 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ad67e62930d4138924c3f1a7c9e86caf084f4ecd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardshrink_backward_out : public at::meta::structured_hardshrink_backward { +void impl(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dc89708fb51fe71384ac60b165a06e44091f9086 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardshrink_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); +}; + +struct TORCH_API hardshrink_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0ac5cfccd87dc62a9dd9584839b937bd4aab978d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..64ce3072ecbb371c1c166e61fb11567872f6913e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..125d4fe0aae09a885acec9c3dcae7b1a246a7ad3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..3eeb8f90f49dd5b92712cf6430aa03765328efa3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardshrink : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & lambd); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..45da88a4c4d95b747e5615ad26c0ae7c26534cb1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd66907d167f44ea1372dbbc8f5c395b38aa86db --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardshrink_out : public at::meta::structured_hardshrink { +void impl(const at::Tensor & self, const at::Scalar & lambd, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4cd8e678e06cca6e5e5721f25def440e5ddbd85b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardshrink_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); +}; + +struct TORCH_API hardshrink { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid.h new file mode 100644 index 0000000000000000000000000000000000000000..c4d1e7294a1b31ac68513c9ece6d99341c633b24 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardsigmoid_out::call(self, out); +} +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardsigmoid_out::call(self, out); +} + +// aten::hardsigmoid(Tensor self) -> Tensor +inline at::Tensor hardsigmoid(const at::Tensor & self) { + return at::_ops::hardsigmoid::call(self); +} + +// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & hardsigmoid_(at::Tensor & self) { + return at::_ops::hardsigmoid_::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..512970cc3a3e70cc6c71babfd20fde4654bd6a65 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward_grad_input::call(grad_output, self, grad_input); +} +// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::hardsigmoid_backward_grad_input::call(grad_output, self, grad_input); +} + +// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward::call(grad_output, self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38e950a9e74183da370fda5496cec06e932a26d0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..071c9e930bf93ac1cf49abdea85fa0e551745093 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ddedaf4cb1de8e242b8313803308e3c73eb75a42 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..937b7e30ec3bf345c7637b59d8138de717b68a7b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardsigmoid_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4de83514317b1f429fce7c809a5db850a1f58859 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..387902a87bbb0fc83e2e0eee66d98c9c3d7b11da --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardsigmoid_backward_out : public at::meta::structured_hardsigmoid_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fed65e34b93674d8d9db67edfe72eef2331261f9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardsigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); +}; + +struct TORCH_API hardsigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4fbd4a144d95ad8c8fe8aa7b9fc496df7dde1b1e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5b243a7a623184634d1aad1da0af8b690c70521a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..642140c2e920127aa6d870cf48a07eef153b1213 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a8052706d360cc1bf7b98756af8ba769e54d1df2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardsigmoid : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b91f096e0a72beb6fc978452c7af98ffd27fd1fd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_native.h new file mode 100644 index 0000000000000000000000000000000000000000..69a25c78319abd9c45e9cd8c7680dd34d191766f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hardsigmoid_out : public at::meta::structured_hardsigmoid { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor hardsigmoid_quantized_cpu(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out_quantized_cpu(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..26ce105bb2fd43c450af7fd3f8cf5d84b461c70c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardsigmoid_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API hardsigmoid { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardsigmoid(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API hardsigmoid_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardsigmoid_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardsigmoid_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish.h new file mode 100644 index 0000000000000000000000000000000000000000..837940963d78968f77af90ed37edc53fa6c10f2e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardswish_out::call(self, out); +} +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_out::call(self, out); +} + +// aten::hardswish(Tensor self) -> Tensor +inline at::Tensor hardswish(const at::Tensor & self) { + return at::_ops::hardswish::call(self); +} + +// aten::hardswish_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & hardswish_(at::Tensor & self) { + return at::_ops::hardswish_::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..d9673b9d4920a1b2f1ac70d86b9cdf21e0f5ee15 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward::call(grad_output, self); +} + +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5630a0560540fa2db05115e868a4eb560172d067 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5bfedcad5b6c885e68c1510406d04f33bccdb66a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa6dab7b19b1b19e6c9f69bd2fe56334229a3aa7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..959fa22250bebcc516c7fface2d0e9218132fa12 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & hardswish_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6c531fa54fb6788ac8cb2dbfd5628005dc7978ad --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardswish_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardswish_backward(Tensor grad_output, Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +struct TORCH_API hardswish_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d4793bce95907be7bab651ba176c568b5ed02e1e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f29b0293645726c227b2264e4eaf866a1e179a4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6742cd451354901b1bf59404bcac68e1de9962a3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_native.h new file mode 100644 index 0000000000000000000000000000000000000000..32efe4cd7cf26bb2768bf5fa978bd650c24302f9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0659ec70b6022a5427e1d826ee90214abd51d126 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardswish_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API hardswish { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardswish(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API hardswish_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardswish_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardswish_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h new file mode 100644 index 0000000000000000000000000000000000000000..f9fe344a9634d43780fcf531a2ae73535fb08001 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} + +// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor +inline at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh::call(self, min_val, max_val); +} + +// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) +inline at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_::call(self, min_val, max_val); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b68f30d75d36d5d5b068fc417009dd2f64690c95 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward_grad_input::call(grad_output, self, min_val, max_val, grad_input); +} +// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) { + return at::_ops::hardtanh_backward_grad_input::call(grad_output, self, min_val, max_val, grad_input); +} + +// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor +inline at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46aae89384310e93a51ad04c0626b6c782ad745c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c17b850d85b8b28e488a54b8875aeaeac8e15023 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..462896c61f42b57b72b25f8a73fe5f0eaa4790b6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8df67f4f2950f0a88164f80797efd68cf781e846 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardtanh_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); +}; + +struct TORCH_API hardtanh_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90e05ba52039c823736375263b44bd6106bddf98 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38b0c66ba237f53279c6efc9faeb71ae75a4afd0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..726e9613e9aedf34c3a673a556b41405e6a2829c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..408220c832c62e86e86e36aebfb89958fa8c2025 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor hardtanh_quantized_cpu(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out_quantized_cpu(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_quantized_cpu_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b69e991db58f2af89c9124a617e35a65e80fa6c0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardtanh_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +}; + +struct TORCH_API hardtanh { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +struct TORCH_API hardtanh_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardtanh_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..4d7a90a612568222ae3f36c40709d3c210152301 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hash_tensor(Tensor self, int[1] dim=[], *, bool keepdim=False, int mode=0) -> Tensor +inline at::Tensor hash_tensor(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0) { + return at::_ops::hash_tensor::call(self, dim, keepdim, mode); +} + +// aten::hash_tensor.out(Tensor self, int[1] dim=[], *, bool keepdim=False, int mode=0, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hash_tensor_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0) { + return at::_ops::hash_tensor_out::call(self, dim, keepdim, mode, out); +} +// aten::hash_tensor.out(Tensor self, int[1] dim=[], *, bool keepdim=False, int mode=0, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hash_tensor_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode, at::Tensor & out) { + return at::_ops::hash_tensor_out::call(self, dim, keepdim, mode, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8ea3aa8c9f33684aa9befea5af3ea26cb593786c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hash_tensor(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..56fa4c08cc26038f8ca1a6b5ca0991199ffcea5a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hash_tensor(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0); +TORCH_API at::Tensor & hash_tensor_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0); +TORCH_API at::Tensor & hash_tensor_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..267fede9e20d4f921844dd53d238f52d8f94d0b6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hash_tensor(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0); +TORCH_API at::Tensor & hash_tensor_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0); +TORCH_API at::Tensor & hash_tensor_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d57d256f0adcd956834142df79b8f88bb0c178da --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hash_tensor : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fbf4208d3e5a74f994734b5d332be9dcd6d62cdd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hash_tensor(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0); +TORCH_API at::Tensor & hash_tensor_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false, int64_t mode=0); +TORCH_API at::Tensor & hash_tensor_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e6a4a683117908d55d8b2dc8005ff220a9f517b6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hash_tensor_out : public at::meta::structured_hash_tensor { +void impl(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..62b26927637a4d284cc598f213299a4e6456ec0d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hash_tensor_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hash_tensor { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hash_tensor"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hash_tensor(Tensor self, int[1] dim=[], *, bool keepdim=False, int mode=0) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode); +}; + +struct TORCH_API hash_tensor_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hash_tensor"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hash_tensor.out(Tensor self, int[1] dim=[], *, bool keepdim=False, int mode=0, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, int64_t mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside.h new file mode 100644 index 0000000000000000000000000000000000000000..491ed497f25db1e253bccf2d4dc7ca589467c081 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside_out::call(self, values, out); +} +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) { + return at::_ops::heaviside_out::call(self, values, out); +} + +// aten::heaviside(Tensor self, Tensor values) -> Tensor +inline at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside::call(self, values); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..39c520468b9d3671e6a5e847d0488e4d2a61635f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..89834dee1d41873ba12d2cb18b059c006bf75b4e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c28380986264e55a7c971fa47cc11537ce793250 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..f5e3821eb30bdf4d02b3b687681ca70faaa365a6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_heaviside : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & values); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e980bb6084f9335285247d1f3e3ea7c3d6ae624 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_native.h new file mode 100644 index 0000000000000000000000000000000000000000..62850eb8cef517b65db39ba06384ec3e44d434c1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_heaviside_out : public at::meta::structured_heaviside { +void impl(const at::Tensor & self, const at::Tensor & values, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4ba3c33ebef6a4d3bb613092931fa85153ffd5d0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API heaviside_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::heaviside"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +}; + +struct TORCH_API heaviside { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::heaviside"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "heaviside(Tensor self, Tensor values) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & values); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values); +}; + +struct TORCH_API heaviside_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::heaviside_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & values); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..80cc0352916097a4276f7bb7ba6ea21cf1c01cd6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor +inline at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e5b29bc9b9f7e3b8ef594bc0295912bb24f88935 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..80b70c1a1a4a27b896fe8e4446bd4895c70bfdb8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d2b5792c73ce9642d1bc9807f5e39f87cf57b318 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hinge_embedding_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hinge_embedding_loss"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc.h new file mode 100644 index 0000000000000000000000000000000000000000..5c825acf22664cf262f73e066122ff505b09e8a5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} + +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +inline at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc::call(self, bins, min, max); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1328d6bab03f8e24f8256931f04997facbbde84 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc6f0db6aeae39e5229d10539e520599cd9f127d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ca4c5490e5f10a009cdaabb46c5a246b9d4385e7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor histogram_histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histogram_histc_out(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +TORCH_API at::Tensor _histc_cuda(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & _histc_out_cuda(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f2812aa090f85202ef0ac076b258b9b0e9a4c52f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histc_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histc"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +}; + +struct TORCH_API histc { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histc"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram.h new file mode 100644 index 0000000000000000000000000000000000000000..65128a77c3972346c90aa587848d223934ab6814 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor_out::call(self, bins, weight, density, hist, bin_edges); +} +// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_outf(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bins_tensor_out::call(self, bins, weight, density, hist, bin_edges); +} + +// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor::call(self, bins, weight, density); +} + +// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct_out::call(self, bins, range, weight, density, hist, bin_edges); +} +// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +inline ::std::tuple histogram_outf(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bin_ct_out::call(self, bins, range, weight, density, hist, bin_edges); +} + +// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a0f90ac5c6d585da6d5ac008dbd5575ea11ad8c8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dcf73d3a9c8a5716f506f2c1277f9cddadb5844a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5c216dc8fa608a411c20426a5e048c2e71888166 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histogram_bins_tensor_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bins_tensor_out"; + static constexpr const char* schema_str = "histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +}; + +struct TORCH_API histogram_bins_tensor { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bins_tensor"; + static constexpr const char* schema_str = "histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogram_bin_ct_out { + using schema = ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bin_ct_out"; + static constexpr const char* schema_str = "histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)"; + static ::std::tuple call(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +}; + +struct TORCH_API histogram_bin_ct { + using schema = ::std::tuple (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogram"; + static constexpr const char* overload_name = "bin_ct"; + static constexpr const char* schema_str = "histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)"; + static ::std::tuple call(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd.h new file mode 100644 index 0000000000000000000000000000000000000000..df9903b68451a3a7774f63ea472a98fe8507df38 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd.h @@ -0,0 +1,41 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +inline ::std::tuple> histogramdd(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogramdd::call(self, bins, range, weight, density); +} + +// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +inline ::std::tuple> histogramdd(const at::Tensor & self, int64_t bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density); +} + +// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +inline ::std::tuple> histogramdd(const at::Tensor & self, at::TensorList bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0557c7ab3977d764c45f4bcee65472ac9969101b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, int64_t bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::TensorList bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..95123b2764a58d9ed576adecc9553965a3ab408d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, int64_t bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +TORCH_API ::std::tuple> histogramdd(const at::Tensor & self, at::TensorList bins, ::std::optional> range=::std::nullopt, const ::std::optional & weight={}, bool density=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..03ff17d117479e8fbdf11c3585b54efae80a8944 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogramdd_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histogramdd { + using schema = ::std::tuple> (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogramdd"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)"; + static ::std::tuple> call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogramdd_int_bins { + using schema = ::std::tuple> (const at::Tensor &, int64_t, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogramdd"; + static constexpr const char* overload_name = "int_bins"; + static constexpr const char* schema_str = "histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)"; + static ::std::tuple> call(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +struct TORCH_API histogramdd_TensorList_bins { + using schema = ::std::tuple> (const at::Tensor &, at::TensorList, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::histogramdd"; + static constexpr const char* overload_name = "TensorList_bins"; + static constexpr const char* schema_str = "histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)"; + static ::std::tuple> call(const at::Tensor & self, at::TensorList bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit.h new file mode 100644 index 0000000000000000000000000000000000000000..cb9bff5f2f8ef4828c26145fe063e04452c60941 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector hsplit(const at::Tensor & self, int64_t sections) { + return at::_ops::hsplit_int::call(self, sections); +} + +// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::hsplit_array::call(self, indices); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e5bb8049724285cc8faf8b1143b840d41ee9d09 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector hsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2c5086c562e8db26edc7bf88b72c1f4f05695933 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector hsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..261e95234ff7d098f4581f3ffb70635bd3196f56 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hsplit_int { + using schema = ::std::vector (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hsplit"; + static constexpr const char* overload_name = "int"; + static constexpr const char* schema_str = "hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]"; + static ::std::vector call(const at::Tensor & self, int64_t sections); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections); +}; + +struct TORCH_API hsplit_array { + using schema = ::std::vector (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hsplit"; + static constexpr const char* overload_name = "array"; + static constexpr const char* schema_str = "hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]"; + static ::std::vector call(const at::Tensor & self, at::IntArrayRef indices); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm.h new file mode 100644 index 0000000000000000000000000000000000000000..2db9e16dd2baf7a752dcdca4c1043605ce2d9cfd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm_out::call(mat1, mat2, out); +} +// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::hspmm_out::call(mat1, mat2, out); +} + +// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor +inline at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm::call(mat1, mat2); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..227afb22e0813bfcd58b596f45d27e3473d6a730 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hspmm_sparse_cpu(const at::Tensor & mat1, const at::Tensor & mat2); +TORCH_API at::Tensor & hspmm_out_sparse_cpu(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +TORCH_API at::Tensor hspmm_sparse_cuda(const at::Tensor & mat1, const at::Tensor & mat2); +TORCH_API at::Tensor & hspmm_out_sparse_cuda(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..718af4e2e841c75af46b947c3c6fcaf0142276a8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hspmm_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hspmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hspmm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out); +}; + +struct TORCH_API hspmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hspmm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hspmm(Tensor mat1, Tensor mat2) -> Tensor"; + static at::Tensor call(const at::Tensor & mat1, const at::Tensor & mat2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack.h new file mode 100644 index 0000000000000000000000000000000000000000..7d90ebe74dd2c3fb9e1c9d4db42abd93c4e9806a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hstack(Tensor[] tensors) -> Tensor +inline at::Tensor hstack(at::TensorList tensors) { + return at::_ops::hstack::call(tensors); +} + +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors) { + return at::_ops::hstack_out::call(tensors, out); +} +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out) { + return at::_ops::hstack_out::call(tensors, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e7d4bccbbbb214b5a16b7d9bcddd7ac8aa7ff65 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor hstack(at::TensorList tensors); +TORCH_API at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d1922270180d196c4225b7698ade658e8f04f19f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hstack(at::TensorList tensors); +TORCH_API at::Tensor & hstack_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fa6a1acbc7870d34e5b2fb8b842341197234de05 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hstack { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hstack"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hstack(Tensor[] tensors) -> Tensor"; + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API hstack_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hstack"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..24f867cb53bec93d3a6dee4477fc0b510d83ea6a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} + +// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor +inline at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss::call(self, target, reduction, delta); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..8466194623786c896a5a7c151458be6c769b772a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward_out::call(grad_output, self, target, reduction, delta, grad_input); +} +// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) { + return at::_ops::huber_loss_backward_out::call(grad_output, self, target, reduction, delta, grad_input); +} + +// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor +inline at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0815cd96efab939c37dd08122e79caa8f282ec1f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c040c728a022c82d6495c6102fe9424e176b9fc3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..534fd4019d4547dca8c8d7d498233daeac7124a2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2aa9fbe41b0776f7fd1d81a71f86495e7580e857 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cd74e3234def3ce56098fb9b4cd20dc384f55245 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API huber_loss_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); +}; + +struct TORCH_API huber_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c19936a33610a166a0e56aae14ccd357b9afd95c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e20abaad71c819df823ed82aa19b0d55bf11487e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a5d021d1717508b6f610b14d75ec8f84f98ef97c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6a54a990b50ec0e05417ef11e66fadcb5c96c5d6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API huber_loss_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +}; + +struct TORCH_API huber_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::huber_loss"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot.h new file mode 100644 index 0000000000000000000000000000000000000000..bb9748adb9e1ea9ad4e084d3b52129a6cff4ec01 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot_out::call(self, other, out); +} +// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::hypot_out::call(self, other, out); +} + +// aten::hypot(Tensor self, Tensor other) -> Tensor +inline at::Tensor hypot(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a073bed34110233a9370f476e37ddadb51feb9f9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..07a16a1ec721476c56cd6ce5b97f3a86f4d707a8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a090ebcd807b51431038fbc01ec2ec46986aaafa --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..875f0110a40b4e2feb591a4bb4810d869d1d5983 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hypot : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..36fe940f953a313521b36625dc28c3f5eaef62ca --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f27289ec82f4195ebf69d70e25ec00049604d21d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_hypot_out : public at::meta::structured_hypot { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5e0d70dce8d448520d9583039d4346029bbeae36 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hypot_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hypot"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API hypot { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hypot"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hypot(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API hypot_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hypot_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h new file mode 100644 index 0000000000000000000000000000000000000000..c72b1ab6022e6282583cb470233cf95c9815e4e4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::i0(Tensor self) -> Tensor +inline at::Tensor i0(const at::Tensor & self) { + return at::_ops::i0::call(self); +} + +// aten::i0_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & i0_(at::Tensor & self) { + return at::_ops::i0_::call(self); +} + +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::i0_out::call(self, out); +} +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::i0_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b5cd615ecd35730f9e17cf9663799d7dab3ec000 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f21daeb1bfe50734f5646f253d62d86a33406c04 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..071a436948fc7d9220df45c3d8cb98f91349061f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..086098e842a608e04dae827f12c9ae2686e8200e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_i0 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4f3e136ebe261b908fc48dd390af63e641f922a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..486593d4f54f2ace1f6c74020764bafbe1487dcf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_i0_out : public at::meta::structured_i0 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..96b6b26e570e1da968c6cb514cd4ac36fb965d21 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API i0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::i0"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "i0(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API i0_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::i0_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "i0_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API i0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::i0"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma.h new file mode 100644 index 0000000000000000000000000000000000000000..cf8d9116d65f8638e71f9c191d8541b252d7290a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma_out::call(self, other, out); +} +// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igamma_out::call(self, other, out); +} + +// aten::igamma(Tensor self, Tensor other) -> Tensor +inline at::Tensor igamma(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..993156e810d1e7fb1576fda00cf1855602473f28 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..346cefa58c38137cd370a4e64eed6531e14961b1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1abc1ed59555bb1223fa9b4fa14beb50b2da6cc0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..646b4d820de95e25e0d5e6e41fa28c0bd9dde3b5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igamma : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37990bb89853a1b5bdc96ded1decff382fee9187 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7cf09f02f8d12c30d4dc421d94b896527446e66e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_igamma_out : public at::meta::structured_igamma { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..aa02f5acd94f3d02bb1260e88e36db21ef34717a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API igamma_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igamma"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API igamma { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igamma"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igamma(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API igamma_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igamma_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac.h new file mode 100644 index 0000000000000000000000000000000000000000..a2da7e71a570c65bae0bed3ac229f6871d557544 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac_out::call(self, other, out); +} +// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igammac_out::call(self, other, out); +} + +// aten::igammac(Tensor self, Tensor other) -> Tensor +inline at::Tensor igammac(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f379585d85c7eaec55415268b0dd5ba7cbdfbf83 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..72bd9f918f33c7acccb97dc086cc45a4fb881d7c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..802b83632fb36510e1859ba333f67df84360298c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9b612a90a73fa25b142ca978b63e5cba16fe7f96 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igammac : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a10c3d3aead9f28e4d8b15b299c58b9ff91c4118 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5a56ddab31f924399800969c86ad6d35f659bf97 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_igammac_out : public at::meta::structured_igammac { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..49b216c18b3b706c28224c4f202c503c152f924b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API igammac_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igammac"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API igammac { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igammac"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igammac(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API igammac_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::igammac_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..9b63246c8acc553b97d8cc53d07d1f24477ed451 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} + +// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f894d1b0cc209549f95060ce8d2a94b08ceabb54 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..77db0b79ae45859727929e1c65b36d2a56155830 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h new file mode 100644 index 0000000000000000000000000000000000000000..166d718e2b0bc5f8c2b08c5761f198a3a329f403 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor im2col_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor im2col_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..77012f1b80511ace20419ee62610aa9f593ae9e7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API im2col_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::im2col"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +}; + +struct TORCH_API im2col { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::im2col"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag.h new file mode 100644 index 0000000000000000000000000000000000000000..1e98a9e27065364e1e6fb705eabcd37fb980082b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::imag(Tensor(a) self) -> Tensor(a) +inline at::Tensor imag(const at::Tensor & self) { + return at::_ops::imag::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ed78d4b3036eda7306082c445c405e7b846461a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor imag(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1238223d229e23b65c18398de72207031b765aae --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor imag(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a78c05a4c11528b4ed45c5d9b80968fec8ea3d58 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API imag { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::imag"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "imag(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index.h new file mode 100644 index 0000000000000000000000000000000000000000..bbc9b1aa1d3b74fc0a2f6c7af95a7929f3804c3b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +inline at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices) { + return at::_ops::index_Tensor::call(self, indices); +} + +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices) { + return at::_ops::index_Tensor_out::call(self, indices, out); +} +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out) { + return at::_ops::index_Tensor_out::call(self, indices, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add.h new file mode 100644 index 0000000000000000000000000000000000000000..dc869c1f1b845ffe1e520695b73588b5a89fd156 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_out::call(self, dim, index, source, alpha, out); +} +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::index_add_out::call(self, dim, index, source, alpha, out); +} + +// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add::call(self, dim, index, source, alpha); +} + +// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_dimname::call(self, dim, index, source, alpha); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f0f8337346d2ccb6c4efd91defec36e6bf1051 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df159d65c6a9a6ee02aa3c91082f018aa9fb47d2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4080458b80553ba8663d14ea25b396020aac9d23 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fdbf44b7c04c0bac689bb16e864b8ffeefeed957 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6a6fba887e03a9d81326b7d3ac96fff8ba54db6b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_add : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c48adc8cf61afe59deed014552ecacfc2cbc1ee7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d6f58f70ebba01bb01617ae3b57b5cbfbaa6b340 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_add_cpu_out : public at::meta::structured_index_add { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, const at::Tensor & out); +}; +struct TORCH_API structured_index_add_cuda_out : public at::meta::structured_index_add { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..23d38b8ecb933257e5fab8d1b56779f31868ae4b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_add_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API index_add_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +struct TORCH_API index_add { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +struct TORCH_API index_add_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_add"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..95836db5b8c3c12568980cc11270717162720355 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..43e0ea7ac6ecfbfaf788b7f1480c3084c8a5aeb0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_out::call(self, dim, index, source, out); +} +// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) { + return at::_ops::index_copy_out::call(self, dim, index, source, out); +} + +// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy::call(self, dim, index, source); +} + +// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_dimname::call(self, dim, index, source); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8840e6f40e210e20efc16443bc127ae901101f5a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a12333ade3b0ffd94d8705486b9fdcc1f71928a7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..587861ac0f1f9aad782d6f3abae6bf70ea60e1c8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6092003dd6d727ee6338ccd7f02646d9316a10a5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..529c00e6ec058f4c931281dab1b9779340f2b81c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_copy : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f3c852ed8eb41ef9a3d24690b0d41a599430fa45 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ce24aaa6138f596585c84a99fc7b5aaecee1586d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_copy_out : public at::meta::structured_index_copy { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Tensor & out); +}; +TORCH_API at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..779e2e437d7d4f19dfcecb41b3b5620beed3c1d1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_ops.h @@ -0,0 +1,73 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_copy_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +}; + +struct TORCH_API index_copy_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy__dimname { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy_"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_copy"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..33b5003030f46842945bf85ea5f9948d9b25ab73 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ffc46538c69526890151db27afd927707c87d8d8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill.h new file mode 100644 index 0000000000000000000000000000000000000000..ac9558167a1bc1e9353c50750c874d8763039d59 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill.h @@ -0,0 +1,64 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar::call(self, dim, index, value); +} + +// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor::call(self, dim, index, value); +} + +// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value); +} + +// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value); +} + +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out); +} +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out); +} + +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out); +} +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { + return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f94f5aa1224f5cc024b55afb159d164432a53856 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67d83db26f9da694c4ce49025dfe38448a971d8d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c5611cf35a5b1625c2ecfb77c119f204346cd915 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8da068c69cda4236c7ddfe482473834fd9c9ef4b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..edf36d849dd34827357ff07afc5c00348c748de7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f020fa571bd5385f623408075ae36d1c0622d4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_int_Scalar_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_int_Tensor_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..86959b175e8e9e1fd1c2df66f4d1cc3cf0818c13 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h @@ -0,0 +1,128 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_fill__int_Scalar { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "int_Scalar"; + static constexpr const char* schema_str = "index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill_int_Scalar { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Scalar"; + static constexpr const char* schema_str = "index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill__int_Tensor { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "int_Tensor"; + static constexpr const char* schema_str = "index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_int_Tensor { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Tensor"; + static constexpr const char* schema_str = "index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill__Dimname_Scalar { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "Dimname_Scalar"; + static constexpr const char* schema_str = "index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill__Dimname_Tensor { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill_"; + static constexpr const char* overload_name = "Dimname_Tensor"; + static constexpr const char* schema_str = "index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_Dimname_Scalar { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "Dimname_Scalar"; + static constexpr const char* schema_str = "index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill_Dimname_Tensor { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "Dimname_Tensor"; + static constexpr const char* schema_str = "index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_int_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Scalar_out"; + static constexpr const char* schema_str = "index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +}; + +struct TORCH_API index_fill_int_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_fill"; + static constexpr const char* overload_name = "int_Tensor_out"; + static constexpr const char* schema_str = "index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..82695421457bb13b26020f29df8716a07dc92186 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_Tensor : public TensorIteratorBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_sizes(at::DimVector value) { + static_assert(SIZES == false, "sizes already set"); + precompute_out ret; +ret.sizes = value; +ret.strides = this->strides; +return ret; + } + + + precompute_out set_strides(at::DimVector value) { + static_assert(STRIDES == false, "strides already set"); + precompute_out ret; +ret.sizes = this->sizes; +ret.strides = value; +return ret; + } + + at::DimVector sizes; +at::DimVector strides; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, at::IOptTensorListRef indices); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a2ae14581517ba37e337fa89f41500694d221020 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2b5629a5f920c731e3ed8cbcc703c83e630058c7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_out : public at::meta::structured_index_Tensor { +void impl(const at::Tensor & self, at::DimVector sizes, at::DimVector strides, const at::Tensor & out); +}; +TORCH_API at::Tensor quantized_index(const at::Tensor & self, const c10::List<::std::optional> & indices); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ac43475b4be22a3f96663051b6d26c54964e0c16 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_Tensor { + using schema = at::Tensor (const at::Tensor &, const c10::List<::std::optional> &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const c10::List<::std::optional> & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices); +}; + +struct TORCH_API index_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put.h new file mode 100644 index 0000000000000000000000000000000000000000..711a8bc4760c1951c49564d1da285f3688e90340 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) +inline at::Tensor & index_put_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_::call(self, indices, values, accumulate); +} + +// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor +inline at::Tensor index_put(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put::call(self, indices, values, accumulate); +} + +// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_put_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_out::call(self, indices, values, accumulate, out); +} +// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_put_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { + return at::_ops::index_put_out::call(self, indices, values, accumulate, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c481ad70f88fdf50129dc4f8675286bb1aa92394 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor index_put(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & index_put_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h new file mode 100644 index 0000000000000000000000000000000000000000..eef12242d8e1deced44037fee9b977f59c37dff9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_put(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_out(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & index_put_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..770ff719dea081be27ff473384c0e75bc836c2be --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_put_ { + using schema = at::Tensor & (at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_put_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); +}; + +struct TORCH_API index_put { + using schema = at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_put"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); +}; + +struct TORCH_API index_put_out { + using schema = at::Tensor & (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_put"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..3bbad8f6828c7ab5b72756e31f76c62ac32b3eba --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out); +} +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out); +} + +// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e1bdada37eeca0157e39485bc18b89259cd77452 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..817635950ce5369895f887983a91bf17e933e453 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3f0b9444b30a113e3d8df143d751a21797d6283b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..cfb2deccc3ab35b40b79988516213e3befe46559 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_reduce : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a00a88d2ae924fec00dff4a255dd40aa4fe1912 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3e074e1bbba4d104f52e478611eb19d0ce0f65ec --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_reduce_cpu_out : public at::meta::structured_index_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, const at::Tensor & out); +}; +struct TORCH_API structured_index_reduce_cuda_out : public at::meta::structured_index_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..307a77abcfbdd5dde569c2cb03bf32ad40d5bab7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_reduce_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_reduce"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +}; + +struct TORCH_API index_reduce_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_reduce_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +struct TORCH_API index_reduce { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_reduce"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select.h new file mode 100644 index 0000000000000000000000000000000000000000..2d483a35bc24bb602c6cceca84b9e874157d08c0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_out::call(self, dim, index, out); +} +// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_out::call(self, dim, index, out); +} + +// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor +inline at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select::call(self, dim, index); +} + +// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname_out::call(self, dim, index, out); +} +// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_select_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_dimname_out::call(self, dim, index, out); +} + +// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor +inline at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname::call(self, dim, index); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..da6823fe2b2f2d927984eb601220a468bc9add50 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward.h @@ -0,0 +1,48 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor +inline at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index); +} +namespace symint { + template >> + at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index); + } +} + +// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor +inline at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, self_sizes, dim, index); +} +namespace symint { + template >> + at::Tensor index_select_backward(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::call(grad, self_sizes, dim, index); + } +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc81d1b303dc5d807548ea9dc781504388ed5960 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aaa014c73fe97f251f4d804791a36d262f0bae0e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b4dc6d3142191d9c4c87e1d62e9935a5950bc85a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_select_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ac1dcb68bfe88a267b5014a6268e988d2234b421 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..771318844ccbb86af2de4dbbf4226a3450314e72 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c405d9d27346b2a7e37aea59f5b782c788f5cb5b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b1f088b550fa4d21ed9ec0e4f05cdaea3e8d3db8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_select_cpu_(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out_cpu_(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); +TORCH_API at::Tensor index_select_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); +TORCH_API at::Tensor index_select_sparse_cpu(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_sparse_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_quantized_cpu_(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select_quantized_cuda(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f9fb7c2e08703809b1e0ccfd1a2bd7e9ae334306 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_select_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API index_select { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "index_select(Tensor self, int dim, Tensor index) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index); +}; + +struct TORCH_API index_select_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = "dimname_out"; + static constexpr const char* schema_str = "index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API index_select_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::index_select"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices.h new file mode 100644 index 0000000000000000000000000000000000000000..d510aa3bbc3bb21982ec5060aa5fc52371856000 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bc50a92282a4c60aa605ad05da778b2c9bceef4e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor indices(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..e408aa0c7d26816b8bc4006fae60cd05816c8dcb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::indices_copy(Tensor self) -> Tensor +inline at::Tensor indices_copy(const at::Tensor & self) { + return at::_ops::indices_copy::call(self); +} + +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::indices_copy_out::call(self, out); +} +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::indices_copy_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..da83e3f3323c418188451db8e403a7d2ce4f8855 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40131cf854de239d045455a93d6dcbe2ef5c2f9a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor indices_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bed217c98a239c43426a42200be2ad04e1d5e003 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & indices_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor indices_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c67320fc0446ac64d117206d81d0e2e907c766b5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API indices_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::indices_copy"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "indices_copy(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API indices_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::indices_copy"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3eab0abbbd5b8dda76bf8041936a0029f3c33d1a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor indices_default(const at::Tensor & self); +TORCH_API at::Tensor indices_sparse(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..aac6130f7de92d26031416216c06bda59895f013 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/indices_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::indices"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "indices(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..78eb30a31957c134de24b5e6f88b6be65782e836 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor +inline at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self) { + return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..871c17558c0f6df3a8ff83aa71ed7d92ba2d48e2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7e7a728b084ea226738d46ae10f8061b5799a2d4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c3cc3313eaa21a31b79054c17b90f8753b4bb56e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API infinitely_differentiable_gelu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::infinitely_differentiable_gelu_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner.h new file mode 100644 index 0000000000000000000000000000000000000000..5222acf2a31b89d866059272cea4f133f1e03711 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::inner(Tensor self, Tensor other) -> Tensor +inline at::Tensor inner(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner::call(self, other); +} + +// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inner_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner_out::call(self, other, out); +} +// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inner_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::inner_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..64a0abc863fd95e0dc9fb86d5db508d039c4d547 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor inner(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & inner_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & inner_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a4343decb14b38c7d3b44043df77318367a6c0ca --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor inner(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & inner_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..694cdefc506b2dd35a6f54b27be2e4c2f8e4e027 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API inner { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inner"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "inner(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API inner_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inner"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..2e90ec2bfea3a54baf1f535f252efec28d237898 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor +inline at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c31900a681b5ac51d5378d0c895118ccc4683618 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..426f3b785c90c6e24ed93f841bd2e81f050cab66 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor instance_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e15d24d6f4895f3cf203ea293ded0e765cfb2ab5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/instance_norm_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API instance_norm { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::instance_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr.h new file mode 100644 index 0000000000000000000000000000000000000000..51a6f71f9bb2eee94610bf427e99ede652d1218f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::int_repr(Tensor self) -> Tensor +inline at::Tensor int_repr(const at::Tensor & self) { + return at::_ops::int_repr::call(self); +} + +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::int_repr_out::call(self, out); +} +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::int_repr_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7404a372626b2999e9a273ed241a1cea2521a2d9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4f7b48a27c104aa4e6b279edf222fbb0fb54ed2d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & int_repr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor int_repr_quantized_cpu(const at::Tensor & self); +TORCH_API at::Tensor int_repr_quantized_cuda(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c85660578c842d0b216996361769aea236157781 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/int_repr_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API int_repr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::int_repr"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "int_repr(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API int_repr_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::int_repr"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse.h new file mode 100644 index 0000000000000000000000000000000000000000..c8677511a1de34216eef80d245b5433686ab7072 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::inverse(Tensor self) -> Tensor +inline at::Tensor inverse(const at::Tensor & self) { + return at::_ops::inverse::call(self); +} + +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::inverse_out::call(self, out); +} +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::inverse_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..980f66cf28024104e8c2973e3d5d758d998ad5c8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor inverse(const at::Tensor & self); +TORCH_API at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5cb926c8ce00859568771cc93d920d687bb5bf6f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor inverse(const at::Tensor & self); +TORCH_API at::Tensor & inverse_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7da8726ac113f6d2f7548a6456d3dd4ff32f8328 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API inverse { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inverse"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "inverse(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API inverse_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::inverse"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h new file mode 100644 index 0000000000000000000000000000000000000000..738dcb8afb9f0b563da7be2265b3458e6b94e235 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..34c78ad9f09d6207cc6d03eb39f10534cd9076d2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_coalesced(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_native.h new file mode 100644 index 0000000000000000000000000000000000000000..319f5bd5214d820ed7a840538d60d9df735ad870 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_coalesced_default(const at::Tensor & self); +TORCH_API bool is_coalesced_sparse(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0d57762bc5e9b29ac132aea3bf9baaf1d494e3f2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_coalesced { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_coalesced"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_coalesced(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..ade957e20245146bc58b2db71dc5b8e9a650306e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_complex(Tensor self) -> bool +inline bool __dispatch_is_complex(const at::Tensor & self) { + return at::_ops::is_complex::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97d19a2aed49393d57d11d14f60d5be91f139293 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_complex(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e33b9448064fb71f5fa690a480b4049c8b010e80 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_complex(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5ae511553d20ea6bb85b5df078732822ba883cfb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_complex { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_complex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_complex(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj.h new file mode 100644 index 0000000000000000000000000000000000000000..945bce695b7499b25037d7fd9e1cfef3139de044 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_conj(Tensor self) -> bool +inline bool __dispatch_is_conj(const at::Tensor & self) { + return at::_ops::is_conj::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..360e209a38b494a62fb6dedff9bfab1ccadeda6d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_conj(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5e568d603352ba758941614431cff4493f24fdae --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_conj(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8a06e7534ff63db13bb7d5eb67a3bf6aa2552f80 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_conj { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_conj"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_conj(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed.h new file mode 100644 index 0000000000000000000000000000000000000000..49ff468bf057fe4ebce8e6994abd2d43f820e633 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_distributed(Tensor self) -> bool +inline bool is_distributed(const at::Tensor & self) { + return at::_ops::is_distributed::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..36819d9c3ec11f5cde2da7c112da46b92fdfd0a6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_distributed(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d65f725d7c1fca6a25c2cc0a5a5355ef677dd5f4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_distributed(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1b4e6029c66bafa1c280121486fdf6bc099ea305 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_distributed { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_distributed"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_distributed(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point.h new file mode 100644 index 0000000000000000000000000000000000000000..59585eed4ab0e21e93393daae49a0c6a5b4f0e26 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_floating_point(Tensor self) -> bool +inline bool __dispatch_is_floating_point(const at::Tensor & self) { + return at::_ops::is_floating_point::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d039f3aed74723577c5334cb6ac7f4816b23cc86 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_floating_point(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_native.h new file mode 100644 index 0000000000000000000000000000000000000000..89f8e48b8ae7fd1981d72eca3ba71e58c7fbbabb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_floating_point(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f2a2b9fc5cb8f6708172ffd456d593631aaf78f4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_floating_point_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_floating_point { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_floating_point"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_floating_point(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference.h new file mode 100644 index 0000000000000000000000000000000000000000..0260fce5872e785375d811b2ca2937731248e326 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_inference(Tensor self) -> bool +inline bool __dispatch_is_inference(const at::Tensor & self) { + return at::_ops::is_inference::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aef9b50ca1ad3cd122240094412b6d1fe5c84ea6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_inference(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e99679e4214c48cdcf38d883f9d693ba073487e1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_inference(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c458eb569437dc40f7946ce9c7a7b55f5ccde02d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_inference { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_inference"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_inference(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf.h new file mode 100644 index 0000000000000000000000000000000000000000..7aa4bc56ca202df2b715040c0bdfc9c84277050b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fed78e5b4eb6316eaecd5f05cd15ad0e70476c2f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_leaf(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fe540d99ddb30049061435630867619755bf6a21 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_leaf(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fe77d943af4d35a2273a4ce78d3302ad74730afe --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_leaf { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_leaf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_leaf(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg.h new file mode 100644 index 0000000000000000000000000000000000000000..dc4b658282a59a7f580af0828451d9d31e12e6e1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_neg(Tensor self) -> bool +inline bool __dispatch_is_neg(const at::Tensor & self) { + return at::_ops::is_neg::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a88b448c4c8ddbc0676aa839e532d2016f13c1d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_neg(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4a077f5f85d1536ec5453799882069821fb6cdbf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_neg(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..87c0f50aaef0c644e0d14efe1e6a71bea38171c2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_neg { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_neg"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_neg(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero.h new file mode 100644 index 0000000000000000000000000000000000000000..fa4b50fbf012ceb0ce06f6983abda46c26d90b7c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_nonzero(Tensor self) -> bool +inline bool is_nonzero(const at::Tensor & self) { + return at::_ops::is_nonzero::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..091fa5cdb037e375c8ca7a16ca9681ba9be5bd88 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_nonzero(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_native.h new file mode 100644 index 0000000000000000000000000000000000000000..be8fa4290eefba8c0b7c5d8e6a43a305c15fdb6d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_nonzero(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4d99959ea07de90505d10754ed70f4fb6a00bad5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_nonzero_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_nonzero { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_nonzero"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_nonzero(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned.h new file mode 100644 index 0000000000000000000000000000000000000000..a6bbbcddcc9097470bac17eae7ec2a2b1a5ee46e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb0152474ec8c70f7e5e5d21980ec6f84b71172c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_pinned(const at::Tensor & self, ::std::optional device=::std::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_native.h new file mode 100644 index 0000000000000000000000000000000000000000..09905031415aedcf7538584d738d16e69361d57f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_pinned(const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API bool is_pinned_sparse_coo(const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API bool is_pinned_sparse_compressed(const at::Tensor & self, ::std::optional device=::std::nullopt); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..89381466b1ec174b38ca63df98b7d9e886c32081 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_pinned { + using schema = bool (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_pinned"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_pinned(Tensor self, Device? device=None) -> bool"; + static bool call(const at::Tensor & self, ::std::optional device); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional device); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size.h new file mode 100644 index 0000000000000000000000000000000000000000..9e34c987789d8985fecb51eaa6d5287d68bca440 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_same_size(Tensor self, Tensor other) -> bool +inline bool is_same_size(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::is_same_size::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c7b8206515af1375770ce99df6ac17821cc3dd7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..07648c4416ba8aa4fa385104792ecc1036e6588e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other); +TORCH_API bool nested_is_same_size(const at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..61b418996fff20c97ad9c484ef82f8cb43db8183 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_same_size { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_same_size"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_same_size(Tensor self, Tensor other) -> bool"; + static bool call(const at::Tensor & self, const at::Tensor & other); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h new file mode 100644 index 0000000000000000000000000000000000000000..2e1605ed4e79faebd1e70937efa59e6772ee4da1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06a867a3f36a53486cbb1c50d2a9a72ab62924d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8385c5190105b13d293b910f29f08a0dd5fd3502 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b74140e2f6123209704af82bcb206b5464d5c42d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7a52ea7b72d84bbd33e4a35be40ff6771c58cb11 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_set_to { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_set_to"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_set_to(Tensor self, Tensor tensor) -> bool"; + static bool call(const at::Tensor & self, const at::Tensor & tensor); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed.h new file mode 100644 index 0000000000000000000000000000000000000000..c80e39a321bbf4227799f151becb2ed064904c9e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_signed(Tensor self) -> bool +inline bool __dispatch_is_signed(const at::Tensor & self) { + return at::_ops::is_signed::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a7b4b971e028ae74fbd9d27886021a63f2a1393 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_signed(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6da929626c9c6cc500145b66530db89a1940a73c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_signed(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6ee010b5e1c4ed84039798fa762c1306ffdd31eb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_signed { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_signed"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_signed(Tensor self) -> bool"; + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available.h new file mode 100644 index 0000000000000000000000000000000000000000..e008577753b03b1ba4073becd6a00acca0763951 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_vulkan_available() -> bool +inline bool is_vulkan_available() { + return at::_ops::is_vulkan_available::call(); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67645c74f872ed505d143f35e6b67929eb133236 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_vulkan_available(); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h new file mode 100644 index 0000000000000000000000000000000000000000..66c546844affec0fc71b273d3eacacc7f212febd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_vulkan_available(); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..37c4904a7290e307028e9f02c35d7cd501c3c073 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_vulkan_available_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_vulkan_available { + using schema = bool (); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::is_vulkan_available"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "is_vulkan_available() -> bool"; + static bool call(); + static bool redispatch(c10::DispatchKeySet dispatchKeySet); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose.h new file mode 100644 index 0000000000000000000000000000000000000000..77c85286ba4ff7c65bf4f8559891ad1e293a1c9f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +inline at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::isclose::call(self, other, rtol, atol, equal_nan); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb7f4052ee537e17340b969829ddd20e3a4ab4c4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..259f8f81ddd805e9c81f389e2e048046b1584d9e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d0f5f2c82b887c0126c8b65637dc00c7b6ff0007 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isclose { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, double, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isclose"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite.h new file mode 100644 index 0000000000000000000000000000000000000000..a59cceda3e3d7770777cd28c8a2db9fe8b6a0a77 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isfinite(Tensor self) -> Tensor +inline at::Tensor isfinite(const at::Tensor & self) { + return at::_ops::isfinite::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..def73445aa606f253e255ab9ef8d1208835d20af --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isfinite(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b87b9e436833dac28984de6b91f73b7205398d89 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isfinite(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d500117837699fc49fd9e916f1a326cc0d0261cd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isfinite { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isfinite"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isfinite(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin.h new file mode 100644 index 0000000000000000000000000000000000000000..a6b5d92404327da21ce6c7ec323e4e8064a37cc5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin.h @@ -0,0 +1,68 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out); +} +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out); +} + +// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert); +} + +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out); +} +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out); +} + +// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert); +} + +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out); +} +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out); +} + +// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fcbf2a10d38a8181774357e3d3d92e2bb508f8c9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd02933656f4ecfdb3a19f02cc78303158f93ef1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_cpu_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..78ff202871c5ee01025a2783a0a48283b00090a9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_cuda_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7f3f48579bb51cdb081ce01fa784c8ff16ac02c6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta.h @@ -0,0 +1,37 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isin_Tensor_Tensor : public at::impl::MetaBase { + + + void meta(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; +struct TORCH_API structured_isin_Tensor_Scalar : public at::impl::MetaBase { + + + void meta(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); +}; +struct TORCH_API structured_isin_Scalar_Tensor : public at::impl::MetaBase { + + + void meta(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5a9cf70fbbca31cdd7689a7fe6f4dced9f32b8b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +TORCH_API at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false); +TORCH_API at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_native.h new file mode 100644 index 0000000000000000000000000000000000000000..645acda773cb2c0aaa60bb1878783ec33408afb2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_isin_Tensor_Tensor_out : public at::meta::structured_isin_Tensor_Tensor { +void impl(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, const at::Tensor & out); +}; +struct TORCH_API structured_isin_Tensor_Scalar_out : public at::meta::structured_isin_Tensor_Scalar { +void impl(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, const at::Tensor & out); +}; +struct TORCH_API structured_isin_Scalar_Tensor_out : public at::meta::structured_isin_Scalar_Tensor { +void impl(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..73647333b7cee4b8db81e1e0ef18c73692945bf7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isin_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Tensor_out"; + static constexpr const char* schema_str = "isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Tensor"; + static constexpr const char* schema_str = "isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"; + static at::Tensor call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +struct TORCH_API isin_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Scalar_out"; + static constexpr const char* schema_str = "isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Tensor_Scalar"; + static constexpr const char* schema_str = "isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor"; + static at::Tensor call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); +}; + +struct TORCH_API isin_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Scalar_Tensor_out"; + static constexpr const char* schema_str = "isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isin"; + static constexpr const char* overload_name = "Scalar_Tensor"; + static constexpr const char* schema_str = "isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"; + static at::Tensor call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf.h new file mode 100644 index 0000000000000000000000000000000000000000..a03d4755230b31a8e36d56b7e261216192f28857 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isinf(Tensor self) -> Tensor +inline at::Tensor isinf(const at::Tensor & self) { + return at::_ops::isinf::call(self); +} + +// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isinf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isinf_out::call(self, out); +} +// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isinf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isinf_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..573d0afbce5fcc21f8f8c4b985ab6ab4c55f657c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor isinf(const at::Tensor & self); +TORCH_API at::Tensor & isinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3b5a381f9789169e47b5e3b8ba50c8392d32c281 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isinf(const at::Tensor & self); +TORCH_API at::Tensor & isinf_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor NestedTensor_isinf(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse_meta(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan.h new file mode 100644 index 0000000000000000000000000000000000000000..8532df93e89eccedfb29d0cff90c56946a9b35e3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isnan(Tensor self) -> Tensor +inline at::Tensor isnan(const at::Tensor & self) { + return at::_ops::isnan::call(self); +} + +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isnan_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isnan_out::call(self, out); +} +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isnan_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isnan_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d6f6d12ce72dc94ab37559c899704fe5cb68d44f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & isnan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isnan_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a99e27ace3e434034132c76bb96b962f81749a6f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isnan(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b31ac1b5353a6c1a60cfc6f5ac2e50b5f2266e5b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isnan(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dfc36c34a4abdba03c40c3685b067083937e95e1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & isnan_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isnan(const at::Tensor & self); +TORCH_API at::Tensor NestedTensor_isnan(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..77d43462949e0a484ebb9217560b073cc87a8abb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isnan { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isnan"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isnan(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isnan_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isnan"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf.h new file mode 100644 index 0000000000000000000000000000000000000000..cf8758dd1dda5a73e4652ceb92d2878de13a5cff --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isneginf(Tensor self) -> Tensor +inline at::Tensor isneginf(const at::Tensor & self) { + return at::_ops::isneginf::call(self); +} + +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isneginf_out::call(self, out); +} +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isneginf_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..20f281fbfcf6a7223b75628baf6a9938d5cc192d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ee95465e0ff7d2100d0c4ee50262e48127785e7c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9b007238b4049fd710b0fb0037fa6f17954227c8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9e5995ca4bf670b3baeace14f8895aa0b089c337 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isneginf : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..529d9ee2f8f1c19066bd1d104e2887e49aa11593 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..62cb78b646300d90cd75928887187b1f0842a192 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_isneginf_out : public at::meta::structured_isneginf { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_isneginf(const at::Tensor & self); +TORCH_API at::Tensor isneginf_sparse(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isneginf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bfc0274c586d729868007c5c9526c7a15fc71573 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isneginf { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isneginf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isneginf(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isneginf_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isneginf"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf.h new file mode 100644 index 0000000000000000000000000000000000000000..091745805ce032abbe41177dd0317853160e3112 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isposinf(Tensor self) -> Tensor +inline at::Tensor isposinf(const at::Tensor & self) { + return at::_ops::isposinf::call(self); +} + +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isposinf_out::call(self, out); +} +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isposinf_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..80523e781050ef36805acbfbd675dcaffa7c26c8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ef274924c4e4bb2bfbf5eeb0bc781e4eacae7ea0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e8ad855dda3d589b9df3ec6d86da5d65220b07c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2f1614f9cf4b236c1eea299de19afc3117f2f38e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isposinf : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b40d121d4e8baff929717269d4d145fab1f748cf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6d16fb503e09a5c5d99fa3a561dd859c13dbea2c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_isposinf_out : public at::meta::structured_isposinf { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_isposinf(const at::Tensor & self); +TORCH_API at::Tensor isposinf_sparse(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isposinf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a442540d2afec71a6397f2db9e6c2bead4e2ae67 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isposinf { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isposinf"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isposinf(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isposinf_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isposinf"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal.h new file mode 100644 index 0000000000000000000000000000000000000000..a969fbc6230acbe6f295911c30522d8b69c20a22 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isreal(Tensor self) -> Tensor +inline at::Tensor isreal(const at::Tensor & self) { + return at::_ops::isreal::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e7e3a6ae918f437a65a7efa32ebca8ad052dd9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isreal(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7d3de4e56e6e85bb25307be8a20a95c3062c524b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isreal(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f23295e28a479519795f8df635b8fd40d2d0b663 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isreal { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::isreal"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "isreal(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft.h new file mode 100644 index 0000000000000000000000000000000000000000..3cda83886b3970904906c7cc53887298b67061c5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +inline at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false) { + return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cfd3090e53a8d0cad5c4e8e8a8d390ba961f1b41 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ecf46156327f428c5e5aee6db0d32d0fdcb56af4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4dcdf957608ad9f6d6ee966fae85cac6aeae2bea --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/istft_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API istft { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional, ::std::optional, const ::std::optional &, bool, bool, ::std::optional, ::std::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::istft"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool center, bool normalized, ::std::optional onesided, ::std::optional length, bool return_complex); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool center, bool normalized, ::std::optional onesided, ::std::optional length, bool return_complex); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item.h new file mode 100644 index 0000000000000000000000000000000000000000..e035a318d6a2f400806178ada27315e8d34315b2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6648c6622ef814ee942cb359f17150185f8eac08 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Scalar item(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9b4b78782ffccc855c54dcea5f1e2bfe08150118 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Scalar item(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..38e945348e1ffeb6a5a04bc1f8b91c73f931e8aa --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/item_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API item { + using schema = at::Scalar (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::item"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "item(Tensor self) -> Scalar"; + static at::Scalar call(const at::Tensor & self); + static at::Scalar redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h new file mode 100644 index 0000000000000000000000000000000000000000..225106fe3f8d1f51c158b0a9db728eb2c3a99429 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h @@ -0,0 +1,80 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::call(window_length, out); +} +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::call(window_length, out); +} + +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} + +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae88ba420e458f5b917297a27b1f5998fd1088ae --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta); +TORCH_API at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51d2108114bcb0ecfb4c173cbce6e8534817fce1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_beta_out(int64_t window_length, bool periodic, double beta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..123970b32d10cf1bd97caa6d2e8b0654e061b74d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kaiser_window { + using schema = at::Tensor (int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_periodic { + using schema = at::Tensor (int64_t, bool, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "periodic"; + static constexpr const char* schema_str = "kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_beta { + using schema = at::Tensor (int64_t, bool, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "beta"; + static constexpr const char* schema_str = "kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API kaiser_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "periodic_out"; + static constexpr const char* schema_str = "kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +struct TORCH_API kaiser_window_beta_out { + using schema = at::Tensor & (int64_t, bool, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kaiser_window"; + static constexpr const char* overload_name = "beta_out"; + static constexpr const char* schema_str = "kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(int64_t window_length, bool periodic, double beta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h new file mode 100644 index 0000000000000000000000000000000000000000..7a811f322f374710d4754868b71031c19d793fe5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor +inline at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false) { + return at::_ops::kl_div::call(self, target, reduction, log_target); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ba4fdf397867c01740c65d98789efa9c4c4c3f7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4530aa9be8de9d56e49b171efb22185de877f525 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..69a6e5a7c68300bd063e0c84ffa7ad5f55441342 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kl_div { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kl_div"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron.h new file mode 100644 index 0000000000000000000000000000000000000000..b0cae7580cb28d6c424b9cf318eb2d85d4f70f47 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kron(Tensor self, Tensor other) -> Tensor +inline at::Tensor kron(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron::call(self, other); +} + +// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kron_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron_out::call(self, other, out); +} +// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kron_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::kron_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a7675c2860154b74817d55e3ecc269052e0c8f1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor kron(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & kron_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & kron_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_native.h new file mode 100644 index 0000000000000000000000000000000000000000..100ce3c0ee001792719d7648113a9ceeeff86277 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kron(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & kron_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..01af031a9628ce6822f4d1acb8d73f4a921b69a2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kron_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kron { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kron"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kron(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API kron_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kron"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue.h new file mode 100644 index 0000000000000000000000000000000000000000..f259746213879277a0305f34d6d5847e858448c5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue.h @@ -0,0 +1,158 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname(Tensor self, SymInt k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue.dimname(Tensor self, SymInt k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); +} +namespace symint { + template >> + ::std::tuple kthvalue(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +// aten::kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); +} +namespace symint { + template >> + ::std::tuple kthvalue_outf(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices); + } +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5502ba563500415f748ef2932b985c06e8334cba --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d7ef96da0cbae8549d7cd99d2f5fae67548f0955 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e700b819c81b561bc1d96ec35a7421810af7debe --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1b50d3c189cff75719df2e96ca19b611075381fa --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_symint_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::SymInt k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_symint_outf(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b8d133a1995d3b0f9cb4c8cc6f141983936023c2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out_cpu(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_out_cuda(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2d101ccde5f375b95a2d7020be1a37305a10091a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API kthvalue { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim); +}; + +struct TORCH_API kthvalue_values { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, int64_t, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = "values"; + static constexpr const char* schema_str = "kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API kthvalue_dimname { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, at::Dimname, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "kthvalue.dimname(Tensor self, SymInt k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim); +}; + +struct TORCH_API kthvalue_dimname_out { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, at::Dimname, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::kthvalue"; + static constexpr const char* overload_name = "dimname_out"; + static constexpr const char* schema_str = "kthvalue.dimname_out(Tensor self, SymInt k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..337edd9a3e45c69bf6c7e0b95d35a717aa87fa69 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +inline at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::l1_loss::call(self, target, reduction); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ffca09db099a37a84a82c0db3a16d3201fcf7d3b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6b540285268b73b31ab1381747d04406c37a6073 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..46c62ecf4e60412f8cae8fc2a093b40b282078a7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API l1_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::l1_loss"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..b601356f5f0f36de535e80912d9f5ba750b1f0f5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm.h @@ -0,0 +1,48 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor +inline at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable); +} +namespace symint { + template >> + at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable); + } +} + +// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor +inline at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable); +} +namespace symint { + template >> + at::Tensor layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable); + } +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..891c6fd1167c45295ed71aefdecc6e7d324f129b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); +TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0d8c18df53f28a9522d64149809751240744ef4e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..13f06f1a625b11b32a058dde03b433093f2b6a67 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API layer_norm { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, const ::std::optional &, const ::std::optional &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::layer_norm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor"; + static at::Tensor call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enable); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enable); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm.h new file mode 100644 index 0000000000000000000000000000000000000000..e20ccccf2f6a862fc6938de2d2a82c2a23e8a949 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_out::call(self, other, out); +} +// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lcm_out::call(self, other, out); +} + +// aten::lcm(Tensor self, Tensor other) -> Tensor +inline at::Tensor lcm(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm::call(self, other); +} + +// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4f61bade0489e636484d8c4e6498528984f45a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e584ba74bba0d7f11b7de0e3b04fd2c6da0ce7f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..afe9b21d06f08f6ec6b7d27ffcc6579799866b33 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..596967a89bc676f6b3423e96673af1a6c246af06 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lcm : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0082471d2ec99184c21f874f7bfcff3e0d6c9867 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d9c89638aea2f895f98f8c98c279749c44eb8112 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lcm_out : public at::meta::structured_lcm { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c75e18b225c4936257561dd12a0fa55121d664f7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lcm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lcm"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API lcm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lcm"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lcm(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API lcm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lcm_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp.h new file mode 100644 index 0000000000000000000000000000000000000000..303844505864a55f51718417bf34699c3ed8e6ad --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_Tensor::call(self, other); +} + +// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_::call(self, other); +} + +// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ldexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_out::call(self, other, out); +} +// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ldexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ldexp_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1f0147a898d1a46574cef155f91ce3ef2d84367 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ldexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ldexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..891b61c04b509ec572bb6db1b2c912a5ec966837 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ldexp_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f143b78be9e6e7800f2719ac9c300eb7f8564015 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/ldexp_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ldexp_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ldexp"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "ldexp.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API ldexp_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ldexp_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API ldexp_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::ldexp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le.h new file mode 100644 index 0000000000000000000000000000000000000000..67568b6d98dd1a04684ee594f48177278269f29d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar_out::call(self, other, out); +} +// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::le_Scalar_out::call(self, other, out); +} + +// aten::le.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor le(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar::call(self, other); +} + +// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor_out::call(self, other, out); +} +// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::le_Tensor_out::call(self, other, out); +} + +// aten::le.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor le(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..99b0f6716cedbd169834f261cafd4382d460d38b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cd6bc8263231e1bcabd586b128f3f31c160cc9fc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..447d73bd4d2726489ba9dc5ad9cdb053330c09cb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..fc1c087128143c22493310c206ba64dd7d401947 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_le_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_le_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1db66e031270da765232ce8d5baa2158bf73e19c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6aeb8347ceafd4803fa3490bc32f86ad48c50b14 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_le_Scalar_out : public at::meta::structured_le_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor le_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_le_Tensor_out : public at::meta::structured_le_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor le_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e49f21b0db04fca68dbb025dcea8a9bd1eb08258 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/le_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API le_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API le_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "le.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API le_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API le_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "le.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API le__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API le__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::le_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..269261a80741489190464453f9b5773937de1121 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_out::call(self, negative_slope, out); +} +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) { + return at::_ops::leaky_relu_out::call(self, negative_slope, out); +} + +// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor +inline at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu::call(self, negative_slope); +} + +// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) +inline at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_::call(self, negative_slope); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..12e1f60a9559d347b49dbf465318f38f915e8fc1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} + +// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor +inline at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8bd257b124e91c289c6103716780a26926147928 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1ff72fcfcbd7399a369e7d286071fb0c4ae7fa89 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3adc307190db636f1019c168d0c65b244780feca --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..df400dd17d5ca7ddff6d62ef4dd5f2f50b2166cf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_leaky_relu_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..14e3fac8110b6794b296607b16382d98a2d80e87 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..38e9a5d467f26f9550ac4790c8c76d042f0c4df5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_leaky_relu_backward_out : public at::meta::structured_leaky_relu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6131aec3ddd808421b4beda547e0f28031514c6c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API leaky_relu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); +}; + +struct TORCH_API leaky_relu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3330f4dd35b5a024ab651926192530edbe68879d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..20aa6e98c30c69ec0a4a49c77f72bf736683803b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3fbc282ab518dc9d8348d5042c79e3a6ad580cad --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..33d2949b06a4c6965432d36f501a7776cf5ec90b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_leaky_relu : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & negative_slope); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bee59a660dd4b26913632120d3418a2dc4af83e3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d601b61ad7a29c998b2c6e1a18a99c46af23c8f2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_leaky_relu_out : public at::meta::structured_leaky_relu { +void impl(const at::Tensor & self, const at::Scalar & negative_slope, const at::Tensor & out); +}; +TORCH_API at::Tensor leaky_relu_quantized_cpu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out_quantized_cpu(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_quantized_cpu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c40e51efe06212e8ae8b4bf8f362ef5664589049 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API leaky_relu_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +}; + +struct TORCH_API leaky_relu { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & negative_slope); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope); +}; + +struct TORCH_API leaky_relu_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::leaky_relu_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & negative_slope); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp.h new file mode 100644 index 0000000000000000000000000000000000000000..1ce79949d3fa6ef41dbdf93b3789122f3fdb7165 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar_out::call(self, end, weight, out); +} +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) { + return at::_ops::lerp_Scalar_out::call(self, end, weight, out); +} + +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor_out::call(self, end, weight, out); +} +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::lerp_Tensor_out::call(self, end, weight, out); +} + +// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor +inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar::call(self, end, weight); +} + +// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor +inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor::call(self, end, weight); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c236873f881c152b00dd9fe191329df719486f0a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..309e0224b5f46cd2ea57d6591ad8fee0d8a3b620 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4444ea869374a42f7347c6a30ef6509177313fd6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ed874fa524c4c13e72260de827516465c7811cf3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lerp_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; +struct TORCH_API structured_lerp_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a320092df1fdf8c7bf3eef483d5feddf1b83063c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a480dd318e0f7cc35bac01493150d9700c3f5c49 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lerp_Scalar : public at::meta::structured_lerp_Scalar { +void impl(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, const at::Tensor & out); +}; +struct TORCH_API structured_lerp_Tensor : public at::meta::structured_lerp_Tensor { +void impl(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9d74af1a0a27e7b583583a63691228c20a08ca7e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lerp__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +struct TORCH_API lerp_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lerp"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less.h new file mode 100644 index 0000000000000000000000000000000000000000..dcb83287b294b5cb3aa645123c205036d59d456f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar_out::call(self, other, out); +} +// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_Scalar_out::call(self, other, out); +} + +// aten::less.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor less(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar::call(self, other); +} + +// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor_out::call(self, other, out); +} +// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_Tensor_out::call(self, other, out); +} + +// aten::less.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor less(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1787471c9f44c97f73334aefb426695c30091f5d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor less(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal.h new file mode 100644 index 0000000000000000000000000000000000000000..ee7a9ed0c03765dcd11b46f71e51722b842a3851 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar_out::call(self, other, out); +} +// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_equal_Scalar_out::call(self, other, out); +} + +// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar::call(self, other); +} + +// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor_out::call(self, other, out); +} +// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_equal_Tensor_out::call(self, other, out); +} + +// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c75199545f3c947bc3ee933f6eb1864f362f9df5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..af875acc6ce0e5fe2520f927f41293651b2f7a6d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..900a04c1c1542ef8332f168ed99af4219a7374d9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API less_equal_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API less_equal_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less_equal.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_equal_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API less_equal_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less_equal.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API less_equal__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_equal__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_equal_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_native.h new file mode 100644 index 0000000000000000000000000000000000000000..910b068c156c301ee0da36ddcc39cfb435cb59a2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor less(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2b9bd8c4b8b28b0e30d9ef4fa6ae044f4323eaa6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API less_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API less_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API less_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API less__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API less__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::less_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h new file mode 100644 index 0000000000000000000000000000000000000000..65560cc8d449d8461b430f6c224074bc3d4fcf38 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::lgamma_out::call(self, out); +} +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::lgamma_out::call(self, out); +} + +// aten::lgamma(Tensor self) -> Tensor +inline at::Tensor lgamma(const at::Tensor & self) { + return at::_ops::lgamma::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e9233c0a783f38427c158b2a6377d9ea081edb29 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a0f37286cf8a364a7ce77c54dfd2e9dc4352b8e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5895ca6f884253d6d07434b43e8276e7baf979a1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6bf9f3eab9db90c4008a86b97cd69ac22de5f87f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lgamma : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd8a163427a662d384011b48a4c77e0c4117e125 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..905b45032fa81c80281859e086b23ee7d3c4e4a5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lgamma_out : public at::meta::structured_lgamma { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..64d349f5882c5394c2d2763df84a8c338604ab79 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lgamma_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lgamma"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API lgamma_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lgamma_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lgamma_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API lgamma { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lgamma"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lgamma(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift.h new file mode 100644 index 0000000000000000000000000000000000000000..059af89fd5017815b6313d60528538a9db8129cd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lift(Tensor self) -> Tensor +inline at::Tensor lift(const at::Tensor & self) { + return at::_ops::lift::call(self); +} + +// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_out::call(self, out); +} +// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..abd012fd391dc43e1847a657cc9b6f62f7a95512 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor lift(const at::Tensor & self); +TORCH_API at::Tensor & lift_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lift_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh.h new file mode 100644 index 0000000000000000000000000000000000000000..221d46b12e2d0c96edc3acc4ddae6d68537aefd9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lift_fresh(Tensor(a) self) -> Tensor(a) +inline at::Tensor lift_fresh(const at::Tensor & self) { + return at::_ops::lift_fresh::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b1b2d992dd4d2ba5e188e0ba13210bce05efdbe0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor lift_fresh(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..b876387b0d85eed5c5bc5bce3b24e560b9775ba5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lift_fresh_copy(Tensor self) -> Tensor +inline at::Tensor lift_fresh_copy(const at::Tensor & self) { + return at::_ops::lift_fresh_copy::call(self); +} + +// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_fresh_copy_out::call(self, out); +} +// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_fresh_copy_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..20957f2a9243d04e0679c7bf5fd4ffe443570ac8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7903efd51345cce8adc6ff4af99bfda9969d9a96 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lift_fresh_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c58192db2d2ef29a8200f38d5fb2cf5820474e1f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & lift_fresh_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor lift_fresh_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0a37a8ee7ab9e65356b22bef823f0c6e1cc3b8bc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift_fresh_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift_fresh_copy"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lift_fresh_copy(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API lift_fresh_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift_fresh_copy"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..35d6590600571e0d9a9b1b21dc4a6fd701715a8c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lift_fresh(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ba81e729afdcfdaa988f7a7b0e7a3b1f7866e18d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift_fresh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift_fresh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lift_fresh(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..84d23a48424f2e894dc64073818591325e0f1c22 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lift(const at::Tensor & self); +TORCH_API at::Tensor & lift_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d6c2af84d35bc15dda84c5b17609ab15e9bf9b10 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lift(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API lift_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lift"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h new file mode 100644 index 0000000000000000000000000000000000000000..c4885025304df115b16417709f36479a245eb27c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor +inline at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky::call(self, upper); +} + +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8078540fd9c25c3778b4e63ac557775b2a16fd56 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..145101ac934c69b6809e15686e76934cadd48fce --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) +inline ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors); +} + +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +inline ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info); +} +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +inline ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) { + return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..66b1c819c18cb3a72fcbc66d8041c662e78a80eb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..597eb6e7ceacf69e44ecb822adb50311ae0c1d7b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e8877bf4f3df907b3ff15e55f0a75eaa0279f9d7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..208cfe6d09e3833f4ec877251f41a5e7d3769316 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_cholesky_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, bool upper, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1cbc3acb5c01b08257e44e60e28972a9c34a0a9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..54ec0580ffcc8711ceb043492d70e24ea25f5506 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_cholesky_ex_out : public at::meta::structured_linalg_cholesky_ex { +void impl(const at::Tensor & self, bool upper, bool check_errors, const at::Tensor & L, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..30f1d56ffe6fe2568e287567a0d518b08568a6d8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cholesky_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)"; + static ::std::tuple call(const at::Tensor & self, bool upper, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors); +}; + +struct TORCH_API linalg_cholesky_ex_L { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky_ex"; + static constexpr const char* overload_name = "L"; + static constexpr const char* schema_str = "linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)"; + static ::std::tuple call(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e0d09b256eca126d36739274cbe7ab093d2655d6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & linalg_cholesky_out(const at::Tensor & self, bool upper, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ffed9d7c419e46bf896f23eb3bd8d55c8a2b35ba --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cholesky { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, bool upper); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper); +}; + +struct TORCH_API linalg_cholesky_out { + using schema = at::Tensor & (const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cholesky"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, bool upper, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond.h new file mode 100644 index 0000000000000000000000000000000000000000..4c0e212b07ac521695bdf58f8624636b1a5b63a1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, const ::std::optional & p=::std::nullopt) { + return at::_ops::linalg_cond::call(self, p); +} + +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & p=::std::nullopt) { + return at::_ops::linalg_cond_out::call(self, p, out); +} +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, const ::std::optional & p, at::Tensor & out) { + return at::_ops::linalg_cond_out::call(self, p, out); +} + +// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str::call(self, p); +} + +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd2bd54cad7476104a6b890e9dc8e9e7eec63400 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, const ::std::optional & p=::std::nullopt); +TORCH_API at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & p=::std::nullopt); +TORCH_API at::Tensor & linalg_cond_outf(const at::Tensor & self, const ::std::optional & p, at::Tensor & out); +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cd372423d6b7575cc2737fe6eb0f3dd14b51ed9b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, const ::std::optional & p=::std::nullopt); +TORCH_API at::Tensor & linalg_cond_out(const at::Tensor & self, const ::std::optional & p, at::Tensor & out); +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_out(const at::Tensor & self, c10::string_view p, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..130b7ff9af1b58431e66782e61b168f54576a775 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cond { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cond(Tensor self, Scalar? p=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const ::std::optional & p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p); +}; + +struct TORCH_API linalg_cond_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const ::std::optional & p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::Tensor & out); +}; + +struct TORCH_API linalg_cond_p_str { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = "p_str"; + static constexpr const char* schema_str = "linalg_cond.p_str(Tensor self, str p) -> Tensor"; + static at::Tensor call(const at::Tensor & self, c10::string_view p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p); +}; + +struct TORCH_API linalg_cond_p_str_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cond"; + static constexpr const char* overload_name = "p_str_out"; + static constexpr const char* schema_str = "linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, c10::string_view p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross.h new file mode 100644 index 0000000000000000000000000000000000000000..83a3797ed8ad53500a6d04e5257cc5d37b6f7e00 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor +inline at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross::call(self, other, dim); +} + +// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross_out::call(self, other, dim, out); +} +// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_cross_out::call(self, other, dim, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7149e0c7e3711614e8157d2a6b14cd68671ce0e8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e9b5bfc6b109269190fbd467e58a2079a55d5841 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..262603e8f6aa6a87aea8ce8e3f92e20d53ca0676 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..81b8a904cfaf4d335db4f4729a8bd9f98af652bf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_cross : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & other, int64_t dim); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d633f2aa6510a5b0dbc0885e0754681b125b13f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +TORCH_API at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h new file mode 100644 index 0000000000000000000000000000000000000000..80c989b10499d558140947cee032f01fe61582bd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_cross_out : public at::meta::structured_linalg_cross { +void impl(const at::Tensor & self, const at::Tensor & other, int64_t dim, const at::Tensor & out); +}; +TORCH_API at::Tensor linalg_cross_zerotensor(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a35fe78e42d753027b401f964db4dcd9a9c53a77 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cross { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cross"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim); +}; + +struct TORCH_API linalg_cross_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_cross"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h new file mode 100644 index 0000000000000000000000000000000000000000..e5e7542c2bef30fb40e03d61974b3718e22baf02 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_det(Tensor A) -> Tensor +inline at::Tensor linalg_det(const at::Tensor & A) { + return at::_ops::linalg_det::call(A); +} + +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_det_out::call(A, out); +} +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_det_out::call(A, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b7ec419f0b79876a4d1e51bf319baf120fc3ce64 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_det(const at::Tensor & A); +TORCH_API at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A); +TORCH_API at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3883b8916d6618bb3b19541a7825df61d64b7ad3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_det(const at::Tensor & A); +TORCH_API at::Tensor & linalg_det_out(const at::Tensor & A, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..69d9c4cdb090683b3b290fc34dbe5d7553e072a9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_det { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_det"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_det(Tensor A) -> Tensor"; + static at::Tensor call(const at::Tensor & A); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_det_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_det"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & A, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal.h new file mode 100644 index 0000000000000000000000000000000000000000..418c471d52c1160b5ca8d0b7cadb4ba2bee4bf61 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) +inline at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..76359b305432ecaa7e027b8dad0ad15b0aa61194 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8bc3060d9d8df137556afff371674b2b0f81d43d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..80b154e36e69c075a645cad334a87819b8533a6d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_diagonal_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_diagonal { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_diagonal"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig.h new file mode 100644 index 0000000000000000000000000000000000000000..90859bb1e4b3ddccb9b6b116a94217bb7b43e4e4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) +inline ::std::tuple linalg_eig(const at::Tensor & self) { + return at::_ops::linalg_eig::call(self); +} + +// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self) { + return at::_ops::linalg_eig_out::call(self, eigenvalues, eigenvectors); +} +// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { + return at::_ops::linalg_eig_out::call(self, eigenvalues, eigenvectors); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bf6b085acadb2ac1c9422ad310abd0464845790b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5b1d64d56ef10a989a2b7ebfb99dfedb104ceb3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ca81742a74135a64c5b15920d34855a9a821a074 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_eig(const at::Tensor & self); +TORCH_API ::std::tuple linalg_eig_out(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..171f0392a55f4b15606d4102d41a25e903dff9d5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eig { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eig"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)"; + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_eig_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eig"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)"; + static ::std::tuple call(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh.h new file mode 100644 index 0000000000000000000000000000000000000000..e2065a91e88207e21a7e6c748e36391fcfbb1944 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) +inline ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh::call(self, UPLO); +} + +// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eigh_out(at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh_eigvals::call(self, UPLO, eigvals, eigvecs); +} +// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +inline ::std::tuple linalg_eigh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) { + return at::_ops::linalg_eigh_eigvals::call(self, UPLO, eigvals, eigvecs); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00ee1cf75f0bf9d2d4357eed26def19470dc8f38 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_out(at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..12513f4551a5950e98370d9b59988534ab16b111 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API ::std::tuple linalg_eigh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b225af998767d7af34c706816ed7c8b810659c4f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigh_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigh { + using schema = ::std::tuple (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eigh(Tensor self, str UPLO=\"L\") -> (Tensor eigenvalues, Tensor eigenvectors)"; + static ::std::tuple call(const at::Tensor & self, c10::string_view UPLO); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO); +}; + +struct TORCH_API linalg_eigh_eigvals { + using schema = ::std::tuple (const at::Tensor &, c10::string_view, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigh"; + static constexpr const char* overload_name = "eigvals"; + static constexpr const char* schema_str = "linalg_eigh.eigvals(Tensor self, str UPLO=\"L\", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)"; + static ::std::tuple call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals.h new file mode 100644 index 0000000000000000000000000000000000000000..7734a9433663734e3b8cbf3e689339ef077ed341 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eigvals(Tensor self) -> Tensor +inline at::Tensor linalg_eigvals(const at::Tensor & self) { + return at::_ops::linalg_eigvals::call(self); +} + +// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_eigvals_out::call(self, out); +} +// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_eigvals_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b3d126ead33f00cc9f886307a692d4a0e27ecdaf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_eigvals(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e8eabbe79cd4e5198775c977d1e4ab92bfa0ea03 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..754d80abf4172a02c003350772771ca01b441a64 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & linalg_eigvals_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & linalg_eigvals_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e0e67196e7e99e572bfb56ca7ed2fe4020d5303a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_eigvals(const at::Tensor & self); +TORCH_API at::Tensor & linalg_eigvals_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..039a152344bb1fe51ce6f8674f3d8e9dcba56ee8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigvals { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvals"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eigvals(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_eigvals_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvals"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh.h new file mode 100644 index 0000000000000000000000000000000000000000..eb34096e345a6127f7cafe7c37162a49d3980097 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor +inline at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh::call(self, UPLO); +} + +// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvalsh_out(at::Tensor & out, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh_out::call(self, UPLO, out); +} +// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_eigvalsh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) { + return at::_ops::linalg_eigvalsh_out::call(self, UPLO, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38cfa189b5d2dfabc4da4b19489ccb108e8fa5ba --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API at::Tensor & linalg_eigvalsh_out(at::Tensor & out, const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API at::Tensor & linalg_eigvalsh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e2e52a0fd339abe814f4c140090d6d4461892fcf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO="L"); +TORCH_API at::Tensor & linalg_eigvalsh_out(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9af82abdc6bc315e6e324a781738e6a447af53ad --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigvalsh { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvalsh"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_eigvalsh(Tensor self, str UPLO=\"L\") -> Tensor"; + static at::Tensor call(const at::Tensor & self, c10::string_view UPLO); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO); +}; + +struct TORCH_API linalg_eigvalsh_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_eigvalsh"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_eigvalsh.out(Tensor self, str UPLO=\"L\", *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product.h new file mode 100644 index 0000000000000000000000000000000000000000..847d683c6f9e36182545104076fb13f136bafa7d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor +inline at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product::call(input, tau); +} + +// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product_out::call(input, tau, out); +} +// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) { + return at::_ops::linalg_householder_product_out::call(input, tau, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec9f3d689bcfe46ba0984437e4c33c91370f47a4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..301705e7121f4ad6dea7871e597ac691557f1346 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b6001eead1c2f9332621d5ad2e0d381a42778f44 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6d0c0670841d7d6f57d906285435b8df7dc00c37 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_householder_product { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_householder_product"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_householder_product(Tensor input, Tensor tau) -> Tensor"; + static at::Tensor call(const at::Tensor & input, const at::Tensor & tau); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau); +}; + +struct TORCH_API linalg_householder_product_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_householder_product"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv.h new file mode 100644 index 0000000000000000000000000000000000000000..6ba3c09b1f2b73c138680eb8ab22752c9b8d4945 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_inv(Tensor A) -> Tensor +inline at::Tensor linalg_inv(const at::Tensor & A) { + return at::_ops::linalg_inv::call(A); +} + +// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_inv_out::call(A, out); +} +// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_inv_out::call(A, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d79393b23964c8a0760ef11d5bfcc9760f7dc52d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_inv(const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..faf558c1ae17caf303f508efc9fa48eb783b94d7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) +inline ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex::call(A, check_errors); +} + +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +inline ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info); +} +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +inline ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) { + return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..900e0a95b87735f1c4b12a382c53b22552aaeb80 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3786489571eb078c78ffa1ffd9c8a532f024e0f1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca902f8f5b869ea54fc1e7a13324af3b4d779f7a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..01e67c5cf903b1f5c13fcc59fe2bcadba2b70d08 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_inv_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c77e5128b6e93f11aed1874a699ded6b6198bfb5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e296726ad92ca0571689f47687e0ef8cfeb2d595 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_inv_ex_out : public at::meta::structured_linalg_inv_ex { +void impl(const at::Tensor & A, bool check_errors, const at::Tensor & inverse, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..933dcbb1aa50986962f493f00052c77fb8a5abdc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_inv_ex { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)"; + static ::std::tuple call(const at::Tensor & A, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors); +}; + +struct TORCH_API linalg_inv_ex_inverse { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv_ex"; + static constexpr const char* overload_name = "inverse"; + static constexpr const char* schema_str = "linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)"; + static ::std::tuple call(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd17d5716e4cc1503502d84eb81df8d701a64bd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_inv(const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_out(const at::Tensor & A, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9b261c6fd64c4122065d4ae557773c93d5fbaf16 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_inv { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_inv(Tensor A) -> Tensor"; + static at::Tensor call(const at::Tensor & A); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_inv_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_inv"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & A, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor.h new file mode 100644 index 0000000000000000000000000000000000000000..50336907216acecaf6945b56cb27f2df7c016207 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots) +inline ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor::call(self, hermitian); +} + +// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) +inline ::std::tuple linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor_out::call(self, hermitian, LD, pivots); +} +// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) +inline ::std::tuple linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) { + return at::_ops::linalg_ldl_factor_out::call(self, hermitian, LD, pivots); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1141aca785fdcec5456a177fe8ce9e5fafabce2b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..5fffd7c68b749b88adae72d37d27b93b5470c70b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) +inline ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors); +} + +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info); +} +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ba81a25fbc914fa1837ccfa424aa4be48f54258d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..235e90e6557f8016e59481989862b927767639bb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3be116ba230f4f3b26f27741a436b5a163d6c03d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..22e2814006804296d7729db7c75fd9ebd505529f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_ldl_factor_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, bool hermitian, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..253e3d0b5039420d2d7905110cc48f9736b037e2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9b74f9aec1da4fc6a5d5060dee1704b5bea050de --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_ldl_factor_ex_out : public at::meta::structured_linalg_ldl_factor_ex { +void impl(const at::Tensor & self, bool hermitian, bool check_errors, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..526efe991fb6ebfbe1ccd22eec6f5030f1d15ff6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_factor_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors); +}; + +struct TORCH_API linalg_ldl_factor_ex_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor_ex"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a9d4c00ddfc2bab89c3bd0caf0b727f9857034e7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_out(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6b49abcc6ec15bbfd3c7f9bb586b40b9f4da1bde --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_factor { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian); +}; + +struct TORCH_API linalg_ldl_factor_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_factor"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)"; + static ::std::tuple call(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..4771458ab637c14c03aebd47d398f42262085e27 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor +inline at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian); +} + +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out); +} +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d46caca8c21fed8a90c5c3664974d3f1b010e7c4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a85c98e3663dca80dbb0a09dda38f1210359b424 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..32fbb5a1a10123e494665d5264630859c464c450 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e23acb73141ee0abd89992113b963c158808394d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_ldl_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eccb9154c1ecc07d439ef5f66d6e1fbc511c6631 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false); +TORCH_API at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..06ee5ac8644d6aa909a6f4b8e22547c5bbdddc1e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_ldl_solve_out : public at::meta::structured_linalg_ldl_solve { +void impl(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0e9411930b7775c2175486bb940bc1d62f7c3c22 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_solve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor"; + static at::Tensor call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); +}; + +struct TORCH_API linalg_ldl_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_ldl_solve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h new file mode 100644 index 0000000000000000000000000000000000000000..b35e0560786af25efa7fcfc365468eb2b153d0cd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) +inline ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_lstsq::call(self, b, rcond, driver); +} + +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..09f641bb846a6405caa5c2d613072b391fd0b60e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8f69935ccbc8adb4c4d041e58b01924a414c1873 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9f77d69a77c87fb062678b16e81020eeda421b72 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7c64f79c4d66548cc104a3f38619c7d495445c0c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond=::std::nullopt, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_lstsq_out(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5a93cfcc85af2a9c74a89bed9371fecd6f16c62e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lstsq { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lstsq"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver); +}; + +struct TORCH_API linalg_lstsq_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, ::std::optional, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lstsq"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)"; + static ::std::tuple call(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu.h new file mode 100644 index 0000000000000000000000000000000000000000..3e66a6a1f7cab9b65f85aac96e544eb5f04176ce --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) +inline ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu::call(A, pivot); +} + +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_out::call(A, pivot, P, L, U); +} +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::linalg_lu_out::call(A, pivot, P, L, U); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2dee1ae58048f3e8cb66caf33904b2bc5d46e516 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de763562b8b6bfd52bf2d2fa5b19eb4efe03ff75 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..64a0949f1f4ecee5f38aef449204a44c50ee785b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h new file mode 100644 index 0000000000000000000000000000000000000000..1b4ddfb56427e7b856f1abbd2d04eeeb683b3de8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) +inline ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor::call(A, pivot); +} + +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06853c7cd10ba29e532f96be0e71d2765c4d6e7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..88587f438a46e761cbec0beacc79e939e9a062c1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) +inline ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors); +} + +// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex_out::call(A, pivot, check_errors, LU, pivots, info); +} +// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_lu_factor_ex_out::call(A, pivot, check_errors, LU, pivots, info); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb39fd7369f28c7dc6608c99d58114572245c83c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8089cc19412242c8c06e88b1ef36d425cf2c136c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f1386c061174e068cc35eb79d540532a9d4c6c6e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9f399f383f1ef908bb77b70968a46270fef9567f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu_factor_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool pivot, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..35164c499520db75db623591e1af6cf3027b56f5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..06a14b24e644ceea3490a402ece19afeb849de0a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_factor_ex_out : public at::meta::structured_linalg_lu_factor_ex { +void impl(const at::Tensor & A, bool pivot, bool check_errors, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..060020296357f532b568421954f09855efb72b3e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_factor_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor_ex"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors); +}; + +struct TORCH_API linalg_lu_factor_ex_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor_ex"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3acde81310e51c82e7e75950a74b31f326a234f9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_out(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1c6211cbb9e952bcf001193cad16af6e82eeb3e1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_factor { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)"; + static ::std::tuple call(const at::Tensor & A, bool pivot); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot); +}; + +struct TORCH_API linalg_lu_factor_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_factor"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..818caa082ce1bbde2a933e95f4ad0d0f0d21ac06 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool pivot); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1728013043da66604f808b08dc921f5ca5afc218 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d3066dba9409d29f7ff0b8de4b34106db368173e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_out : public at::meta::structured_linalg_lu { +void impl(const at::Tensor & A, bool pivot, const at::Tensor & P, const at::Tensor & L, const at::Tensor & U); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c5a72cc2c81b27fa13a3126351b020950af6695f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)"; + static ::std::tuple call(const at::Tensor & A, bool pivot); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot); +}; + +struct TORCH_API linalg_lu_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)"; + static ::std::tuple call(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..1b60e3e85a2f9dd57ecae8ff384f5575bd1a2d9f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor +inline at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint); +} + +// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out); +} +// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) { + return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..81a0e5a951154c26590fa7b1ace0610f6d9f1462 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..deecd795b72a7db02f83fcf879633760898d3b99 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7527f49e800f4d06551c206df56f2b32deaaa8c7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..03b47a9578f3e6fec8ffa3f039f8e86cc6317a67 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df858127f7fd63ad94be2cafdccd37464cf1af1f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false); +TORCH_API at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8077f217e0a9c4817d3f4396c1477ebf8970f93b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_solve_out : public at::meta::structured_linalg_lu_solve { +void impl(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1b612a58cedcca0b9d3cf202e0b74ad882ecd8d9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_solve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor"; + static at::Tensor call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); +}; + +struct TORCH_API linalg_lu_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_lu_solve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..b4801c8d653ff89d6ac862231948c0e346c351d4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul::call(self, other); +} + +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul_out::call(self, other, out); +} +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::linalg_matmul_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8ef384282b37fc358c66b289825fa229a0133d2d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0e719a6b358d6012356727f8ac4279e6ba09c3a5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & linalg_matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..307165671c1bcf16812994c79b9c0592c90a2838 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matmul_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matmul { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matmul"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linalg_matmul(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API linalg_matmul_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linalg_matmul"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h new file mode 100644 index 0000000000000000000000000000000000000000..907b0732e298492f762ad10fe545c093f1ea7209 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matrix_exp(Tensor self) -> Tensor +inline at::Tensor linalg_matrix_exp(const at::Tensor & self) { + return at::_ops::linalg_matrix_exp::call(self); +} + +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_exp_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_matrix_exp_out::call(self, out); +} +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_exp_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_matrix_exp_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..54db70375d05c0dc94bbc1fc501bbde3c1df2045 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_matrix_exp(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..385ff77c57b92f1b016c35a9a636f3a19b3cf35b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype); +} + +// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm_out::call(self, ord, dim, keepdim, dtype, out); +} +// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_out::call(self, ord, dim, keepdim, dtype, out); +} + +// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype); +} + +// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord_out::call(self, ord, dim, keepdim, dtype, out); +} +// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_str_ord_out::call(self, ord, dim, keepdim, dtype, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2339e1dd3024c89cd8552a03319e65a57cf26883 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5aff85b0b3ae90a9921569d298a4a182ce9b8cda --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_norm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_matrix_norm_out(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr.h new file mode 100644 index 0000000000000000000000000000000000000000..4196528b23f96328fc2f9a0a994b5c47b0d23f48 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) +inline ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr::call(A, mode); +} + +// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +inline ::std::tuple linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr_out::call(A, mode, Q, R); +} +// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +inline ::std::tuple linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) { + return at::_ops::linalg_qr_out::call(A, mode, Q, R); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db410916ccbaeed2ba64dfa188552ace59a35bd9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_compositeexplicitautograd_dispatch.h @@ -0,0 +1,36 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}); +TORCH_API at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps); +TORCH_API at::Tensor & linspace_outf(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..08f9bf3ef0d41ac69d320a0068847915288a6ee9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linspace { + using schema = at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Tensor"; + static constexpr const char* schema_str = "linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Scalar"; + static constexpr const char* schema_str = "linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Scalar_Tensor"; + static constexpr const char* schema_str = "linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API linspace_out { + using schema = at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +}; + +struct TORCH_API linspace_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Tensor_out"; + static constexpr const char* schema_str = "linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +}; + +struct TORCH_API linspace_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Tensor_Scalar_out"; + static constexpr const char* schema_str = "linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out); +}; + +struct TORCH_API linspace_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::linspace"; + static constexpr const char* overload_name = "Scalar_Tensor_out"; + static constexpr const char* schema_str = "linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log.h new file mode 100644 index 0000000000000000000000000000000000000000..cc598e539e07567faff7e5a857fb409c75e7f647 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log(Tensor self) -> Tensor +inline at::Tensor log(const at::Tensor & self) { + return at::_ops::log::call(self); +} + +// aten::log_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log_(at::Tensor & self) { + return at::_ops::log_::call(self); +} + +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_out::call(self, out); +} +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10.h new file mode 100644 index 0000000000000000000000000000000000000000..605af8f225e9a6c9ca7081f1122d85f478cfa708 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log10(Tensor self) -> Tensor +inline at::Tensor log10(const at::Tensor & self) { + return at::_ops::log10::call(self); +} + +// aten::log10_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log10_(at::Tensor & self) { + return at::_ops::log10_::call(self); +} + +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log10_out::call(self, out); +} +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log10_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..763720127af3a54859a2d86422f62f285fcd7609 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log10 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..da65f15fad8bd854b6090ac4cb704d6367a1674d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log10(const at::Tensor & self); +TORCH_API at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log10_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_native.h new file mode 100644 index 0000000000000000000000000000000000000000..89ef4bd62fcbee1941fec80d71c466b66b2c4f26 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_log10_out : public at::meta::structured_log10 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1ce7125dd2c3d2bad9466c9b230e2b593842e258 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log10_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log10 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log10"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log10(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API log10_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log10_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log10_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API log10_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log10"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p.h new file mode 100644 index 0000000000000000000000000000000000000000..79b7f5722b9b474677e39af21d50a9a8226a4c70 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log1p(Tensor self) -> Tensor +inline at::Tensor log1p(const at::Tensor & self) { + return at::_ops::log1p::call(self); +} + +// aten::log1p_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log1p_(at::Tensor & self) { + return at::_ops::log1p_::call(self); +} + +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log1p_out::call(self, out); +} +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log1p_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..faa5515cd50444f2c47a711dfb55796886bc85ee --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5fb20b93cbb5b4ddc23a35e7d7a0dae3374865b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b3fb745a0955d7784a3f6df8ccb51d91355c627b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c6e8da68ac6ed40dbc83ee9fd3a7979804dc03a2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log1p : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8f89b0a6447d9db4dda2150924abe066f5d073dd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7feb7c6aba383260978f5140c866d48e35e4c917 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_log1p_out : public at::meta::structured_log1p { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor log1p_sparse(const at::Tensor & self); +TORCH_API at::Tensor & log1p_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_sparse_(at::Tensor & self); +TORCH_API at::Tensor log1p_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & log1p_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log1p_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b05aaa10eb24cd19f4467d376705e7b90ddd9287 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log1p { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log1p"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log1p(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API log1p_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log1p_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log1p_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API log1p_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log1p"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2.h new file mode 100644 index 0000000000000000000000000000000000000000..4b41d7c3ecc74f46fbcd2e843a779da04ed9ce81 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log2(Tensor self) -> Tensor +inline at::Tensor log2(const at::Tensor & self) { + return at::_ops::log2::call(self); +} + +// aten::log2_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log2_(at::Tensor & self) { + return at::_ops::log2_::call(self); +} + +// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log2_out::call(self, out); +} +// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log2_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0c3163a4ff9fd5eb0c470e199be0f7aec2fb4f50 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log2(const at::Tensor & self); +TORCH_API at::Tensor & log2_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f885a233fbed2c46b8d73c615b32dc327be5cae5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor log2(const at::Tensor & self); +TORCH_API at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log2_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ecf57adf7a6ee88ce14dbb2909a5b6a4d2db8a8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log2(const at::Tensor & self); +TORCH_API at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log2_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d01e94bde57182091462f00093633b891e355202 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log2 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3120a00b761aef2778b4c42af0cb600dcf1fd588 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log2(const at::Tensor & self); +TORCH_API at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log2_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dac676d43968b65c609c16d2fcbebef57789cd1a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_log2_out : public at::meta::structured_log2 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eda566632516fa7a6cbd2dc50bdcfea7c40ffe70 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log2 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log2"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log2(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API log2_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log2_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log2_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API log2_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log2"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1bc9b581a0a6d19e3523246a6a11aaa72bdeb416 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log(const at::Tensor & self); +TORCH_API at::Tensor & log_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e6895456452f63e0a29d93c384a7ed6dff5e0b25 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor log(const at::Tensor & self); +TORCH_API at::Tensor & log_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb0330b29f83c29c60e350fcbcbc14550df96c0f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log(const at::Tensor & self); +TORCH_API at::Tensor & log_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..14c3a57947f6f1251e10d53c9d8c04165a55dd4f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a10faffd3437dcb3625c07aec7289e30fdd34a06 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log(const at::Tensor & self); +TORCH_API at::Tensor & log_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_native.h new file mode 100644 index 0000000000000000000000000000000000000000..483c0c74d39251991092bdcf5b45ebdb6d523e91 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_log_out : public at::meta::structured_log { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal.h new file mode 100644 index 0000000000000000000000000000000000000000..f0f61d46b447ee82870b9b31a9278750e06b25d4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_normal_out(at::Tensor & out, const at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt) { + return at::_ops::log_normal_out::call(self, mean, std, generator, out); +} +// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_normal_outf(const at::Tensor & self, double mean, double std, ::std::optional generator, at::Tensor & out) { + return at::_ops::log_normal_out::call(self, mean, std, generator, out); +} + +// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor +inline at::Tensor log_normal(const at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt) { + return at::_ops::log_normal::call(self, mean, std, generator); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca39f3b89e2de25a1855a3861029357560eba2e1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor log_normal(const at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & log_normal_out(at::Tensor & out, const at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & log_normal_outf(const at::Tensor & self, double mean, double std, ::std::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7d0f5c856c197d616334cc7807636ce9ab3fade1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & log_normal_(at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d15f1e7740b833be8d51e15b60e919485a1389a6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & log_normal_(at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cbf5c2c6369303eeb15f610c06e3969b4c9a70e2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & log_normal_(at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ebc939f68299dd79d6021b961cef29b21d1de22a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_normal(const at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & log_normal_out(const at::Tensor & self, double mean, double std, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor & log_normal_(at::Tensor & self, double mean=1, double std=2, ::std::optional generator=::std::nullopt); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b0c118c3d0f6c2b2549ca9d0d7bd98901ef92d00 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_normal_ { + using schema = at::Tensor & (at::Tensor &, double, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_normal_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, double mean, double std, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, ::std::optional generator); +}; + +struct TORCH_API log_normal_out { + using schema = at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_normal"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, double mean, double std, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API log_normal { + using schema = at::Tensor (const at::Tensor &, double, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_normal"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, double mean, double std, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional generator); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a9be7695ad308e17288141bf69ccaa4098752239 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API log_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API log_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid.h new file mode 100644 index 0000000000000000000000000000000000000000..181fbaae7b9da4cb51bcde2bf502700ecc5de6d7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_sigmoid_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_sigmoid_out::call(self, out); +} +// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_sigmoid_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_sigmoid_out::call(self, out); +} + +// aten::log_sigmoid(Tensor self) -> Tensor +inline at::Tensor log_sigmoid(const at::Tensor & self) { + return at::_ops::log_sigmoid::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..18c71a2a3e042d0bfa3752daf2ab9e4de01328c4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input); +} +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) { + return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input); +} + +// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor +inline at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d7980698e0b0e4f1752bdc70ea8cd26789827aa7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bdafa00523d41ce975fbebbf3a37bd5b9fa7c337 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1c1bd785e86db3c9b9a787f8fff7ef0c1ed168b9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_sigmoid_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_cpu_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +TORCH_API at::Tensor log_sigmoid_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API at::Tensor & log_sigmoid_backward_cuda_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b4d1018c3696e9e35c698e5338f1818f9fb1e9dd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +}; + +struct TORCH_API log_sigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb562a37cf3c1987a31471370a9b3318b384ddf7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor log_sigmoid(const at::Tensor & self); +TORCH_API at::Tensor & log_sigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log_sigmoid_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc80d18d0d2afba44669d55af365323355764a7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) { + return at::_ops::log_sigmoid_forward_output::call(self, output, buffer); +} +// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) { + return at::_ops::log_sigmoid_forward_output::call(self, output, buffer); +} + +// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) +inline ::std::tuple log_sigmoid_forward(const at::Tensor & self) { + return at::_ops::log_sigmoid_forward::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e091a8b009506eedddc0ec194bd8dfe99422605f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple log_sigmoid_forward(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0ad2fd16e07040a6387a04f43e0a8e92001fd7fa --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple log_sigmoid_forward(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..36fe5692331a07dbb52c03b07fb4a1b1e94c017b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple log_sigmoid_forward_cpu(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cpu(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +TORCH_API ::std::tuple log_sigmoid_forward_cuda(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cuda(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..34963567f7cc6b3cea3c9cdba841a4c86f9badcd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_forward_output { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_forward"; + static constexpr const char* overload_name = "output"; + static constexpr const char* schema_str = "log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +}; + +struct TORCH_API log_sigmoid_forward { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid_forward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)"; + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_native.h new file mode 100644 index 0000000000000000000000000000000000000000..810a9ddd6a83c84d7790a05b1ef8732e030aafcd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_sigmoid(const at::Tensor & self); +TORCH_API at::Tensor & log_sigmoid_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f07b2284d0e489a7b0a8f014022e946c461ad378 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API log_sigmoid { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_sigmoid"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "log_sigmoid(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..94d2d65db31d4723b9b8bdcfc310baa84a6d6019 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::log_softmax_int::call(self, dim, dtype); +} + +// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::log_softmax_int_out::call(self, dim, dtype, out); +} +// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_softmax_outf(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::log_softmax_int_out::call(self, dim, dtype, out); +} + +// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::log_softmax_Dimname::call(self, dim, dtype); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44d6bd8aec14f36168edb779521901a40cca968b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & log_softmax_outf(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aa33b523ff758d0edb753665b636e2c523c6642b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d2bde848e0961f97436f9ee00c98df7fe261396f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & log_softmax_out(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..57364e65e6d584caf379a9383549cbf8d81acbb2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_softmax_int { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_softmax"; + static constexpr const char* overload_name = "int"; + static constexpr const char* schema_str = "log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +struct TORCH_API log_softmax_int_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_softmax"; + static constexpr const char* overload_name = "int_out"; + static constexpr const char* schema_str = "log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +}; + +struct TORCH_API log_softmax_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::log_softmax"; + static constexpr const char* overload_name = "Dimname"; + static constexpr const char* schema_str = "log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional dtype); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp.h new file mode 100644 index 0000000000000000000000000000000000000000..7beef01b3bcdda0b4507d907032e34c64b423943 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp_out::call(self, other, out); +} +// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logaddexp_out::call(self, other, out); +} + +// aten::logaddexp(Tensor self, Tensor other) -> Tensor +inline at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2.h new file mode 100644 index 0000000000000000000000000000000000000000..d49de2d45393e0ed79b00392cf7266f98b55e356 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp2_out::call(self, other, out); +} +// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logaddexp2_out::call(self, other, out); +} + +// aten::logaddexp2(Tensor self, Tensor other) -> Tensor +inline at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp2::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6ed6fbf8f670a48b8a8815c55ecb78e9032c71c3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2f51158994d86d670d882ebeb9d52ca6bc927d92 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f73d98a9858f70f8517701303eac9776b0749857 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..0e60159d9529a25ab1b5fdbbb684e0876493c794 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_logaddexp2 : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ff8b8d60632394a8aee5908179e552ede62f34fd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..303f6841daeb573be3d4bdd12a9fbe60c335af9e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_logaddexp2_out : public at::meta::structured_logaddexp2 { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e6131f536aceeef25af75f65d12692669fdfaedd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logaddexp2_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logaddexp2"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API logaddexp2 { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logaddexp2"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logaddexp2(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..389d86be5c7256139462484651ad9811c8af20d5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e4ce066c20fce6053ebfe50f3b47612c451d212e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a03fe4e01b22c0214234bb89a3077c92808fda56 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..52710832f0bd1554d2a51408d63426a81e891063 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_logaddexp : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..085c5c9f76013ae71668447cb7834c8a691f1832 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..31d851206d0800735a2afb2d9c12015e476b6503 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_logaddexp_out : public at::meta::structured_logaddexp { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1025aab361665bd49aa9724efb3f4a8d3d47a323 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logaddexp_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logaddexp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API logaddexp { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logaddexp"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logaddexp(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp.h new file mode 100644 index 0000000000000000000000000000000000000000..4c00dbc7ad51c75723fffd4334264216ebda6764 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logcumsumexp(Tensor self, int dim) -> Tensor +inline at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim) { + return at::_ops::logcumsumexp::call(self, dim); +} + +// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::logcumsumexp_out::call(self, dim, out); +} +// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::logcumsumexp_out::call(self, dim, out); +} + +// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor +inline at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim) { + return at::_ops::logcumsumexp_dimname::call(self, dim); +} + +// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim) { + return at::_ops::logcumsumexp_dimname_out::call(self, dim, out); +} +// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logcumsumexp_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & out) { + return at::_ops::logcumsumexp_dimname_out::call(self, dim, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6986d398400cfa66a2b6a647e0ce2b32926f65bd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f75cb31fecaa314e08a514e3f1bbc1ee258c761 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b17ad3fc0edc9068ca43ed9feec24dc7ad23db72 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & logcumsumexp_out(const at::Tensor & self, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_out(const at::Tensor & self, at::Dimname dim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ff444df66c6fa604ff31ce26abf1ee0756ca55db --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logcumsumexp { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logcumsumexp"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logcumsumexp(Tensor self, int dim) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim); +}; + +struct TORCH_API logcumsumexp_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logcumsumexp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out); +}; + +struct TORCH_API logcumsumexp_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logcumsumexp"; + static constexpr const char* overload_name = "dimname"; + static constexpr const char* schema_str = "logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim); +}; + +struct TORCH_API logcumsumexp_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logcumsumexp"; + static constexpr const char* overload_name = "dimname_out"; + static constexpr const char* schema_str = "logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet.h new file mode 100644 index 0000000000000000000000000000000000000000..cb0dfbbdd96f277f19a4e271f014ae76122d5cc7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logdet(Tensor self) -> Tensor +inline at::Tensor logdet(const at::Tensor & self) { + return at::_ops::logdet::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d916fbe9dd97906ae3ead4e62cbe75691a03f34 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logdet(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e6c13c00e0ca7c4c4c29cbaef7ab8ad198974055 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logdet(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..82e17aba1c9af844bd578f4b5222d986f93e61dc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logdet_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logdet { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logdet"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logdet(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and.h new file mode 100644 index 0000000000000000000000000000000000000000..86b7388c919349369539ea9a2ce961c71eead952 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logical_and(Tensor self, Tensor other) -> Tensor +inline at::Tensor logical_and(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and::call(self, other); +} + +// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and_out::call(self, other, out); +} +// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_and_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..68289ac992566d0667fabdd16ae19930494f2862 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logical_and(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cbba19f9530a4d070f824315fa02bfafc8ba1123 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..244f48cd663a39d1492af353b51a829c9c22712d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a899a2f167d74ffb533cef846c0ec4e4ef1ba688 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logical_and(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_and_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00faa7b7a8b9e25e7cf5391be0add09f8cc01763 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logical_and { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_and"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_and(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_and_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_and_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_and_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_and"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not.h new file mode 100644 index 0000000000000000000000000000000000000000..5cc9ce8fe606d55631c32c4d709290ee39b8e285 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logical_not(Tensor self) -> Tensor +inline at::Tensor logical_not(const at::Tensor & self) { + return at::_ops::logical_not::call(self); +} + +// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::logical_not_out::call(self, out); +} +// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::logical_not_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4aa9f9ef76f305653c668d2f2e4cd8b5bbe6dc3f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logical_not(const at::Tensor & self); +TORCH_API at::Tensor & logical_not_(at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..69344c6b589728d39ab5d4448e9cc27e80d1af19 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e930571b7dc3a059f8834acbaa37da40558408f9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aa83bd4d38bc4994b4a197e173a1e3556c3aae0c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logical_not(const at::Tensor & self); +TORCH_API at::Tensor & logical_not_(at::Tensor & self); +TORCH_API at::Tensor & logical_not_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor NestedTensor_logical_not(const at::Tensor & self); +TORCH_API at::Tensor & NestedTensor_logical_not_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e41cc77e78cbfdf13d1736cff7ebc5dfa51afa7b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_not_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logical_not { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_not"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_not(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API logical_not_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_not_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_not_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API logical_not_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_not"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or.h new file mode 100644 index 0000000000000000000000000000000000000000..9331898783c34030d8cbb7d07bc72bc77029e753 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logical_or(Tensor self, Tensor other) -> Tensor +inline at::Tensor logical_or(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or::call(self, other); +} + +// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or_out::call(self, other, out); +} +// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_or_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8e49c95ec0dd0f30ca62807ca390f01f97b63da4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logical_or(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_or_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24abd8d61c504e34ffe5a93a27ce3ab59b649b52 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & logical_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dae4f1837d7696f7fdfbeab960a29fc9958df84b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ab9be6cc94ce1d38b593bbb2386b82ed03f8631b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logical_or(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_or_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_or_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3bf21bfe67006a3a38c0695684fb2abd19f5d3e6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logical_or { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_or"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_or(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_or_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_or_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_or_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_or"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor.h new file mode 100644 index 0000000000000000000000000000000000000000..073488da594dc23cffa61645eccf9a5269ad2d0a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logical_xor(Tensor self, Tensor other) -> Tensor +inline at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor::call(self, other); +} + +// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor_out::call(self, other, out); +} +// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_xor_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d587f1a4a38ad074d5e84659e00e39d9db55b83 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..94cd5b7588adbf37f7465c8a7867df5121f453ea --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b34d4cc730e2ad2e86142e4ca93cf73ea96e18ee --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..784c8a544d31b65c2c38c2f9c2a1565536416688 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d37de60c4768b3cfa2b5783454871854f4c77885 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logical_xor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_xor"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_xor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_xor_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_xor_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_xor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logical_xor"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit.h new file mode 100644 index 0000000000000000000000000000000000000000..2e1aa8d8bf26e2e3f2f8e50837f5196e58f4404c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logit(Tensor self, float? eps=None) -> Tensor +inline at::Tensor logit(const at::Tensor & self, ::std::optional eps=::std::nullopt) { + return at::_ops::logit::call(self, eps); +} + +// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) +inline at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt) { + return at::_ops::logit_::call(self, eps); +} + +// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logit_out(at::Tensor & out, const at::Tensor & self, ::std::optional eps=::std::nullopt) { + return at::_ops::logit_out::call(self, eps, out); +} +// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logit_outf(const at::Tensor & self, ::std::optional eps, at::Tensor & out) { + return at::_ops::logit_out::call(self, eps, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..13605ed6e6e71eda314bf1c95565ff26468f2fa1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt) { + return at::_ops::logit_backward_grad_input::call(grad_output, self, eps, grad_input); +} +// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input) { + return at::_ops::logit_backward_grad_input::call(grad_output, self, eps, grad_input); +} + +// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor +inline at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt) { + return at::_ops::logit_backward::call(grad_output, self, eps); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3b8e3b83c7ed918c028c3caec1ad22ed8c087e8d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d62a0c3b3cfe9e8d790141618e7557591d8d5197 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c092087da591448ed5c86fd23ec519a15d12b2c4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..00dbd374fee20a5176805497976425d47cd1edb1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_logit_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f111b9e02b26d573b3299fccc7ffc4fa6f0a2cbe --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bbce703a278631a49e704686f4e70b1db12e43cf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_logit_backward_out : public at::meta::structured_logit_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3d5572fa834bdc1419d4b4e615f360f0ace9f5ae --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logit_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logit_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); +}; + +struct TORCH_API logit_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logit_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..100579586410a79cc291d331cdf7bd7de49c9dd3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor logit(const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_out(at::Tensor & out, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_outf(const at::Tensor & self, ::std::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f6c13db351a871c8697fad465782f613ecb206b5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor logit(const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_out(at::Tensor & out, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_outf(const at::Tensor & self, ::std::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b50dd218f4db1041b7b54a59d9d66da6f9e6bf10 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2d79b57852006186ddb9574f9c69fc985fe9d903 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logit(const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_out(const at::Tensor & self, ::std::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ceb916c5d49f493326c02dbea3f5118182686211 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logit { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logit"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logit(Tensor self, float? eps=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, ::std::optional eps); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional eps); +}; + +struct TORCH_API logit_ { + using schema = at::Tensor & (at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logit_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, ::std::optional eps); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional eps); +}; + +struct TORCH_API logit_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logit"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, ::std::optional eps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional eps, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace.h new file mode 100644 index 0000000000000000000000000000000000000000..af68dc2508537d8eae9628c890c1042b85cc088c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace.h @@ -0,0 +1,98 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace::call(start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::logspace::call(start, end, steps, base, dtype, layout, device, pin_memory); +} + +// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace_Tensor_Tensor::call(start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::logspace_Tensor_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory); +} + +// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace_Tensor_Scalar::call(start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::logspace_Tensor_Scalar::call(start, end, steps, base, dtype, layout, device, pin_memory); +} + +// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace_Scalar_Tensor::call(start, end, steps, base, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::logspace_Scalar_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory); +} + +// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_out::call(start, end, steps, base, out); +} +// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_out::call(start, end, steps, base, out); +} + +// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_out(at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_Tensor_Tensor_out::call(start, end, steps, base, out); +} +// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_outf(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_Tensor_Tensor_out::call(start, end, steps, base, out); +} + +// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_out(at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_Tensor_Scalar_out::call(start, end, steps, base, out); +} +// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_outf(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_Tensor_Scalar_out::call(start, end, steps, base, out); +} + +// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_Scalar_Tensor_out::call(start, end, steps, base, out); +} +// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logspace_outf(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_Scalar_Tensor_out::call(start, end, steps, base, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9a959ac8b96a9ddc55070a1d62f7fb3b9190243 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_compositeexplicitautograd_dispatch.h @@ -0,0 +1,36 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}); +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0, at::TensorOptions options={}); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0); +TORCH_API at::Tensor & logspace_outf(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0); +TORCH_API at::Tensor & logspace_outf(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0, at::TensorOptions options={}); +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0); +TORCH_API at::Tensor & logspace_outf(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf0a6fe31c3d4aaaf8162cb08a9f5b2a719ea6c2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0); +TORCH_API at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c9a1134679edc2c02aa0e90c46c9430717ff5da --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0); +TORCH_API at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4784f6355fabc265d6c469efc4a1620a8af89303 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0); +TORCH_API at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cb6999a163d0730130581313688e631966dd1cd2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor & logspace_cuda_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base=10.0, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b06a3991742d483c0e5daac1e5ef8fc118567816 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logspace { + using schema = at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API logspace_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = "Tensor_Tensor"; + static constexpr const char* schema_str = "logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API logspace_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = "Tensor_Scalar"; + static constexpr const char* schema_str = "logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API logspace_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, double, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = "Scalar_Tensor"; + static constexpr const char* schema_str = "logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"; + static at::Tensor call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API logspace_out { + using schema = at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +}; + +struct TORCH_API logspace_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = "Tensor_Tensor_out"; + static constexpr const char* schema_str = "logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +}; + +struct TORCH_API logspace_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = "Tensor_Scalar_out"; + static constexpr const char* schema_str = "logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +}; + +struct TORCH_API logspace_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logspace"; + static constexpr const char* overload_name = "Scalar_Tensor_out"; + static constexpr const char* schema_str = "logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h new file mode 100644 index 0000000000000000000000000000000000000000..130c8755477bda64870c082a5a88ca8c326d955c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp::call(self, dim, keepdim); +} + +// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp_out::call(self, dim, keepdim, out); +} +// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_out::call(self, dim, keepdim, out); +} + +// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names::call(self, dim, keepdim); +} + +// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out); +} +// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f8ec729a51449a1c18d5ba105937f5eef48d30f6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7610b25094236f45ea793b78587d68fe3f7de625 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..322dffc2fbb5f961743483b844984c9018ba91ef --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cbcbff20f71cd510fdff0ad7008058a6aa9df03c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..06d09b94c89366c76b51888a2a4ff002e8a2d63e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_ops.h @@ -0,0 +1,62 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logsumexp { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logsumexp"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API logsumexp_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logsumexp"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API logsumexp_names { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logsumexp"; + static constexpr const char* overload_name = "names"; + static constexpr const char* schema_str = "logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim); +}; + +struct TORCH_API logsumexp_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::logsumexp"; + static constexpr const char* overload_name = "names_out"; + static constexpr const char* schema_str = "logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift.h new file mode 100644 index 0000000000000000000000000000000000000000..85477f5fe3c169cafd889a4685ce661c587bb1a2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar::call(self, other); +} + +// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor::call(self, other); +} + +// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar_out::call(self, other, out); +} +// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::__lshift___Scalar_out::call(self, other, out); +} + +// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor_out::call(self, other, out); +} +// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::__lshift___Tensor_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e018b18ad2ab1d2aa9bd13148584a7304142df0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..29432b17165870eb047ad7c50146606efd5919c0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7771865c5432478b47eb635c97ea39778ab4a6d9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e75d05f6734819a6c04965dabd8bcf4b3b737568 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9d97dca8518b638b8cd24a90cddbed95c2decbce --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & __lshift___Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __lshift___Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5df12389ca8a8537c2471e028d57c977ed1689f2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API __lshift___Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "__lshift__.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __lshift___Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "__lshift__.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API __ilshift___Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__ilshift__"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __ilshift___Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__ilshift__"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API __lshift___Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API __lshift___Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::__lshift__"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h new file mode 100644 index 0000000000000000000000000000000000000000..928ddf8849851638b59da4386143c3402cad8936 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..d139655eae5f633e7a325d87ef15cc76406ab23f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) +inline ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}) { + return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a828f2e0d24e76c399ecbf1785e216cedc2da2c4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c1495f7aca39e2cf436e3d102f45ebbeea3ebf61 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9f0c8bf3128e28ee68fa3d66dad5f2c25a95af94 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lstm_cell { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm_cell"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9a642218aaff628ab37261d9493ddd607d36e28 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5f1a77cbef03d6e6928b1139f792e18379a061b1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) +inline ::std::tuple,::std::vector> lstm_mps_backward(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +inline void lstm_mps_backward_out(at::Tensor & out0, at::TensorList out1, at::TensorList out2, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); +} +// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +inline void lstm_mps_backward_outf(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { + return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..27a000c9bea2ae202910accaf1c1da45edebb7fc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void lstm_mps_backward_out(at::Tensor & out0, at::TensorList out1, at::TensorList out2, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API void lstm_mps_backward_outf(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d71fb6b03d19e081867cd074b18cb47ee363e0a4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void lstm_mps_backward_out(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..20db4dedbfc2a6f7a8d48a3562286526e8e8fed7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_mps_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lstm_mps_backward { + using schema = ::std::tuple,::std::vector> (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm_mps_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])"; + static ::std::tuple,::std::vector> call(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API lstm_mps_backward_out { + using schema = void (const ::std::optional &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool, at::Tensor &, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm_mps_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()"; + static void call(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..443568916ddcee2e5a61a7010d45c8bb89ad4c82 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..70136704b35226156f29575e0bda150c125bae6d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lstm_input { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm"; + static constexpr const char* overload_name = "input"; + static constexpr const char* schema_str = "lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API lstm_data { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lstm"; + static constexpr const char* overload_name = "data"; + static constexpr const char* schema_str = "lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt.h new file mode 100644 index 0000000000000000000000000000000000000000..dcd62dd32d670825fe2c31db40debd62f60a0edb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar_out::call(self, other, out); +} +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::lt_Scalar_out::call(self, other, out); +} + +// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor lt(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar::call(self, other); +} + +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor_out::call(self, other, out); +} +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lt_Tensor_out::call(self, other, out); +} + +// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor lt(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df90696df5931d361757f098ddb861b828ad21a8 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2f39c1e160f1aab8eed435fc84ca45c2fb6878bf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8ee8178d952a2e0e027d12c27368194b465b7d66 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..921cd5d37c87447e41fc74662e635baf5d680c63 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lt_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_lt_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04177f190653f7bf80c171f52a0f369d3659ddc6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8eeeb73b8c83dbf64228ebdef1668e59d2205c27 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lt_Scalar_out : public at::meta::structured_lt_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor lt_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_lt_Tensor_out : public at::meta::structured_lt_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor lt_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a25d9ee0b580b76757e80f16c975f105931c23f1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lt_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API lt_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lt.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API lt_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API lt_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lt.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API lt__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API lt__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lt_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..abdd7d2c70729ea66b78b1865229285df1eb4538 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out); +} +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { + return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out); +} + +// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +inline at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve::call(self, LU_data, LU_pivots); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ab98d22c2613d49a80934047358aa1146c9f718 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +TORCH_API at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +TORCH_API at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..949a0a64f1893d2e65cff291434650ee5f6b7f78 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +TORCH_API at::Tensor & lu_solve_out(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6eb434c496677f4d38c19abf431c313782e6cde1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lu_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lu_solve"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); +}; + +struct TORCH_API lu_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lu_solve"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack.h new file mode 100644 index 0000000000000000000000000000000000000000..20b2a706a0d5f6238ce4e1f5ea51b22bc3dd381f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) +inline ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots); +} + +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); +} +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb64ea5177827f5083e2578558a39576cf4e7a46 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..33a48f21e220762c6041677d52a654dffef51ad9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e47efdf64994ed15c9a3a65809aebfce2320674 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..83f062b35992f3d3834d07f4353716d02b607599 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lu_unpack : public at::impl::MetaBase { + + + void meta(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f70c74014fbb072a8f46bd552e1d17ec50720184 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..893ee080d4c41b34b64b2f2fe0d4a6dd89d211c0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_lu_unpack_out : public at::meta::structured_lu_unpack { +void impl(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, const at::Tensor & P, const at::Tensor & L, const at::Tensor & U); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2e76a47011919730b8de968975eced9986c02d66 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lu_unpack { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lu_unpack"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)"; + static ::std::tuple call(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); +}; + +struct TORCH_API lu_unpack_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::lu_unpack"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)"; + static ::std::tuple call(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH.h new file mode 100644 index 0000000000000000000000000000000000000000..540dcc59f4eff0a55b5ac2a6f0f785adb8b993d9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..867cdb3fa0ee326410e7c93e1b27b0fcaab3e100 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor mH(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ffc7885317db1d98258672418f6dd4bea66b9eea --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor mH(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..80361fedbc8fb8f4c3c7d9d7dbaf12a3a86b24fc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mH { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mH"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "mH(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT.h new file mode 100644 index 0000000000000000000000000000000000000000..97c4bcd2bdff5e4a2663b3b530aab09ca66ac0cc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ee2bcf3e1d2c5545de4a7af15da37007d9bea76 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor mT(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_native.h new file mode 100644 index 0000000000000000000000000000000000000000..72eac1fe8a81886f38f1e0089c8c06d6db47b372 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor mT(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cf9af48e2510a43e18f58b4a60849517af9cf3be --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mT_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mT { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mT"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "mT(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..3be099c6d20d12f4997535dfb3ea4ba0c3f05c16 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor +inline at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ea56b707a0ac0e9398f61c2b7ed8d815afa88f64 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9224f1cca0c9219cf85136bb8ceac18c87e399a3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3c1f549cef67e6c434c0607bda3981fac2f3f70 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API margin_ranking_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::margin_ranking_loss"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor"; + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill.h new file mode 100644 index 0000000000000000000000000000000000000000..8f4509d7c0a8b6cb1f30f04e12b529400125b8e7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill.h @@ -0,0 +1,54 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor +inline at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill_Scalar::call(self, mask, value); +} + +// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor +inline at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill_Tensor::call(self, mask, value); +} + +// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill_Scalar_out::call(self, mask, value, out); +} +// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) { + return at::_ops::masked_fill_Scalar_out::call(self, mask, value, out); +} + +// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill_Tensor_out::call(self, mask, value, out); +} +// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) { + return at::_ops::masked_fill_Tensor_out::call(self, mask, value, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f4d67377d710ba6fdb83e22773d4f79809e3abeb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..39a049b671a527fdd9c458c28d1f81ed4bd253b0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..791fbe7b0c80148cc67cf60c09d0ea9d8d370f20 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb04f83190a57ebe8eb34d584f3648dc2c5830d1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_native.h new file mode 100644 index 0000000000000000000000000000000000000000..46c594559aca371cdb50c439e852a05b99c130e2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_native.h @@ -0,0 +1,33 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_Scalar_out(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & masked_fill__cpu(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill__cuda(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor NestedTensor_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill__quantized_cpu(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill__quantized_cuda(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill_Tensor_out(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); +TORCH_API at::Tensor & masked_fill__cpu(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill__cuda(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill__quantized_cpu(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill__quantized_cuda(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d50268a9bf4b21bc4efa7c808b3cf887777117c9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_fill__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_fill_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +}; + +struct TORCH_API masked_fill_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_fill"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +}; + +struct TORCH_API masked_fill__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_fill_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +}; + +struct TORCH_API masked_fill_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_fill"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +}; + +struct TORCH_API masked_fill_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_fill"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); +}; + +struct TORCH_API masked_fill_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_fill"; + static constexpr const char* overload_name = "Tensor_out"; + static constexpr const char* schema_str = "masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..2e1f1081e685dbd4f9012a167190a48facab1931 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor +inline at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter::call(self, mask, source); +} + +// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter_out::call(self, mask, source, out); +} +// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) { + return at::_ops::masked_scatter_out::call(self, mask, source, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..9e8805982540d27fee5d52db086f3d0703854721 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward.h @@ -0,0 +1,48 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor +inline at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes) { + return at::_ops::masked_scatter_backward::call(grad_output, mask, c10::fromIntArrayRefSlow(sizes)); +} +namespace symint { + template >> + at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes) { + return at::_ops::masked_scatter_backward::call(grad_output, mask, c10::fromIntArrayRefSlow(sizes)); + } +} + +// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor +inline at::Tensor masked_scatter_backward_symint(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) { + return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes); +} +namespace symint { + template >> + at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) { + return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes); + } +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0c660362710ed9c5cb929ef3c466e4e1b49ac105 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes); +TORCH_API at::Tensor masked_scatter_backward_symint(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b30b8328e0b95d3936bd6594ff051be5be27cdcf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_scatter_backward_symint(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..107f71523c9fd72bffb1f7558818848c44253f52 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_scatter_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_scatter_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52225f1698ab5f577980a103947dc9577c883cc0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3430d0a8e58e6d22121550952e31be8044795c37 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c9ff2c639b28b508fb6e75d5a93e6b47ddd8c477 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c2f20fadc3dd07f421c506318d6a7135c75788cb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_native.h new file mode 100644 index 0000000000000000000000000000000000000000..769fac5b56e84f6a3402eca3f965145a65e23578 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_out(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & masked_scatter__cpu(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter__cuda(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..686cd6d818ac90b6ac79c9e0f7e427873bb0f3dd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_ops.h @@ -0,0 +1,51 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_scatter_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_scatter_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +}; + +struct TORCH_API masked_scatter { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_scatter"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +}; + +struct TORCH_API masked_scatter_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_scatter"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select.h new file mode 100644 index 0000000000000000000000000000000000000000..f1ab053c55f6695d5c89aee256d84f674de484ba --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select_out::call(self, mask, out); +} +// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { + return at::_ops::masked_select_out::call(self, mask, out); +} + +// aten::masked_select(Tensor self, Tensor mask) -> Tensor +inline at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select::call(self, mask); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..6a04bcbfced9573d3427b265751ba01769da7c7e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor +inline at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { + return at::_ops::masked_select_backward::call(grad, input, mask); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e09228250fa2131e4e1c6a3bbbb9ce072848553 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..07bcf77c612da4171db16632d037a0c2a9bc3c22 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8c1d1d842cf8410d37b7bc7f76658466ff62da72 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_select_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_select_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc97d901e679b75ff979c0b1f6b4710c33a29c91 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2bf8cc436723bc8d0ed4d6a0bce951889fe7b40d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_native.h new file mode 100644 index 0000000000000000000000000000000000000000..915ae34ec84befc83af9f904a2092eeb8ef465b0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_select_cpu(const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_out_cpu(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); +TORCH_API at::Tensor masked_select_cuda(const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_out_cuda(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f361a4611a2f26b840d3efb6774f4fdaa8d4e6b7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_select_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_select"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); +}; + +struct TORCH_API masked_select { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::masked_select"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "masked_select(Tensor self, Tensor mask) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..a84c8bac23d5c3836c550131974fd1dcd11e0611 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor matmul(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul::call(self, other); +} + +// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul_out::call(self, other, out); +} +// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::matmul_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..0f62735aaa8519a750593a7324a193342da6125d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor) +inline ::std::tuple matmul_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + return at::_ops::matmul_backward::call(grad, self, other, mask); +} + +// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple matmul_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + return at::_ops::matmul_backward_out::call(grad, self, other, mask, out0, out1); +} +// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple matmul_backward_outf(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::matmul_backward_out::call(grad, self, other, mask, out0, out1); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec96e4a1b913da103868f80c282137e744e61981 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple matmul_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask); +TORCH_API ::std::tuple matmul_backward_outf(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d8b5f2f5d5da1d7bcc3b5d9b518c2376c2acdf95 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple matmul_backward_out(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple matmul_backward_nested(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..187cfaf382711fe337330499040d5e90bb95015b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matmul_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matmul_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask); +}; + +struct TORCH_API matmul_backward_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matmul_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ed9a0f0c5deee9f6990b053c7610f6e2562dde2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..efe89f5b0f0056e41cac05dc5e67359e07080fdb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor matmul_nested(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_out_nested(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..57e6cf866db989a99ec384ed4c0526dfbe2a3a47 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matmul { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matmul"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "matmul(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API matmul_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matmul"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h new file mode 100644 index 0000000000000000000000000000000000000000..78ba4fc13e676a95e838ac6e8281865b3103e357 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..50dfbfbfba565a6a4bc500ccd95c09b172dbccc5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matrix_H(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_native.h new file mode 100644 index 0000000000000000000000000000000000000000..65c4a66dbddef66fd784446b41beea93cc7c5140 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor matrix_H(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..179cb9e18af93de11f94d4b0df28b9aa1978bba5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matrix_H { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matrix_H"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "matrix_H(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp.h new file mode 100644 index 0000000000000000000000000000000000000000..d33f29c379c34ad77283814c6b37aa6d2e186328 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::matrix_exp(Tensor self) -> Tensor +inline at::Tensor matrix_exp(const at::Tensor & self) { + return at::_ops::matrix_exp::call(self); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ad30f4580719c559a9d77d28cfc5babb3a5c812b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor +inline at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) { + return at::_ops::matrix_exp_backward::call(self, grad); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6cc1c8d66e8b0fdeedfc36317d10143199fa30b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fc8744ed1714afc46d518383eeaf99f41013f3a9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2f63187e8c2081f31ec49c41ab3782402830f18c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matrix_exp_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matrix_exp_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "matrix_exp_backward(Tensor self, Tensor grad) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..10e184c5a7292d6d56b022a19d861b05b6e13e12 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matrix_exp(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8b5bffb06369a87d9ff9070a2e87e7b2b9637257 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor matrix_exp(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..58b4dd153b2bfdf77a1f6533ac9c0a4a3782137c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matrix_exp { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matrix_exp"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "matrix_exp(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power.h new file mode 100644 index 0000000000000000000000000000000000000000..15881565aa375643386631e6e4f08b01ff6a1d6d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::matrix_power(Tensor self, int n) -> Tensor +inline at::Tensor matrix_power(const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power::call(self, n); +} + +// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power_out::call(self, n, out); +} +// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) { + return at::_ops::matrix_power_out::call(self, n, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..88f400387f104d9b806169e15e1a1ccfd8807b18 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matrix_power(const at::Tensor & self, int64_t n); +TORCH_API at::Tensor & matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n); +TORCH_API at::Tensor & matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f365ae4930ab6653957866f62f4e5d31a4025a4e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor matrix_power(const at::Tensor & self, int64_t n); +TORCH_API at::Tensor & matrix_power_out(const at::Tensor & self, int64_t n, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dffbcf6f96c52d4c68a0891188065c26155b7392 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matrix_power { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matrix_power"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "matrix_power(Tensor self, int n) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n); +}; + +struct TORCH_API matrix_power_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::matrix_power"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max.h new file mode 100644 index 0000000000000000000000000000000000000000..2bb326c53acd2fdb6313bc9b357975d66d5486dc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim::call(self, dim, keepdim); +} + +// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values); +} +// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values); +} + +// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple max(const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim::call(self, dim, keepdim); +} + +// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values); +} +// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple max_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values); +} + +// aten::max(Tensor self) -> Tensor +inline at::Tensor max(const at::Tensor & self) { + return at::_ops::max::call(self); +} + +// aten::max.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor max(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_other::call(self, other); +} + +// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_out::call(self, other, out); +} +// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::max_out::call(self, other, out); +} + +// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::max_unary_out::call(self, out); +} +// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::max_unary_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2629caf8a0300efb62320ca9b85151be6687b5dd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c06dee8eb07a091054df33e776d9b1046efbf621 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple max(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & max_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..773c588deda076c45abfaefd08924c8fd3525ac7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3741e1fcc10dc6bae759a6ddc1e950c2f79a7123 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2155b185276f7044dc86af4ee563bfd6fdd2a506 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_max_dim : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, bool keepdim); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9680e33f3f4f259b6cf3f0cd76260b0791c55cbf --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8fac3223e3eac29b0a94175a695094f16544cb7b --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_native.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_max_out : public at::meta::structured_max_dim { +void impl(const at::Tensor & self, int64_t dim, bool keepdim, const at::Tensor & max, const at::Tensor & max_values); +}; +TORCH_API ::std::tuple qmax(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_unary_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor max_quantized_cpu(const at::Tensor & self); +TORCH_API at::Tensor & max_quantized_unary_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor max(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & max_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..77d780154fb4f1f256188b4966dc68b1acc03ad9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_ops.h @@ -0,0 +1,106 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_dim { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = "dim"; + static constexpr const char* schema_str = "max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim); +}; + +struct TORCH_API max_dim_max { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = "dim_max"; + static constexpr const char* schema_str = "max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +}; + +struct TORCH_API max_names_dim { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = "names_dim"; + static constexpr const char* schema_str = "max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim); +}; + +struct TORCH_API max_names_dim_max { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = "names_dim_max"; + static constexpr const char* schema_str = "max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +}; + +struct TORCH_API max { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API max_other { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = "other"; + static constexpr const char* schema_str = "max.other(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API max_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API max_unary_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max"; + static constexpr const char* overload_name = "unary_out"; + static constexpr const char* schema_str = "max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d.h new file mode 100644 index 0000000000000000000000000000000000000000..de0c65ffcd1c6837947b9f0bd8523ffecf5be9f2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ee83af7db0996e8fea9dd3db1807496bda078dc7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..88c62b1077a196fba48ead159cf35b19e19e1e55 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1acf33a6f3a89372b51b13d276545afdd14f0ea6 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool1d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool1d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..a82b337971fc68d31476ab18fe4d3174a7ab78a4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +inline ::std::tuple max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9921979cc2be89f4db4f8a6cb9ac6b396db3b50c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9bc817e005efa54b8e788c498396392795df9f28 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3f3b024c9eab2d1a6a6a9a11bb95e315d6167f66 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool1d_with_indices { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool1d_with_indices"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..e4b633153e4885d2139c2d31b5e798464b88e80c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..bf69030ff48fa2e1f7f577fb08ce588e5d61d34a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode); +} + +// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_backward_out::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); +} +// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::max_pool2d_backward_out::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc219d66a9ccc27ff012654661a59012a6091f80 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API at::Tensor & max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..72d39c3857c00141c3082534657b3e275cb15c7d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d3c51665c2a7c7341ff41a04495d9355774d6df9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool2d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool2d_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +struct TORCH_API max_pool2d_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool2d_backward"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0da0108e932711216fc2381fb6433058ba00dc37 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..816c8a69fd2e008b020d74acc25d90fb0307a640 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e16b64a372fb58c14f432eea72ad7b05cfff7e01 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool2d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool2d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..1e0f90a4e101e86995175678bb8ff60b91ac0158 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); +} +// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); +} + +// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +inline ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..d6727d69bccbef642f8ed81539e7969d354ee6ff --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} + +// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor +inline at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ff969183a4b7be6dac728dbc965f73e89b12012 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6ea1ec241ee1d96df0745a5c868a2f1c07c264d2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f90d59797e717c75d7e77f85dab5f0a2113a7df5 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1c5465668726ff0207ef8db329ecc1725a39acc2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_max_pool2d_with_indices_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1d80928e8e32b3eb7dfe4b478502a54371cc55b3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..052723b0529d9f204b4949358525bec3b14fd70e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_max_pool2d_with_indices_backward_out_cpu : public at::meta::structured_max_pool2d_with_indices_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, const at::Tensor & grad_input); +}; +struct TORCH_API structured_max_pool2d_with_indices_backward_out_cuda : public at::meta::structured_max_pool2d_with_indices_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c9118b0cf4888b4ae1ce6bd2a58e9639fab1930f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool2d_with_indices_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool2d_with_indices_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API max_pool2d_with_indices_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool2d_with_indices_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90cb462e80eb3c6fb0f53cd3da058af12e413546 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04ea12298a75b2eef1df0122bbbdd74cb3dbdf69 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bd4d715527d294ee7182d85a264379ef27f72d9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..19d566a4d7acf9db9997ec341fe193cb7ce94442 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_max_pool2d_with_indices : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6d7a57759cc4495ee539be01ebd6afc315f3db2d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5382df32fbd0cd3382d68979d2d164b5241f50e2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_max_pool2d_with_indices_out_cpu : public at::meta::structured_max_pool2d_with_indices { +void impl(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & out, const at::Tensor & indices); +}; +struct TORCH_API structured_max_pool2d_with_indices_out_cuda : public at::meta::structured_max_pool2d_with_indices { +void impl(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & out, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..55919a51d6933c0f0a5fc2224ca2fcd3d78d7727 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool2d_with_indices_out { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool2d_with_indices"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); +}; + +struct TORCH_API max_pool2d_with_indices { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool2d_with_indices"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..bb9c9edef823c469d9208bef2e744230afd62efc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb106bc85639df0a72a06927a60a95d2d0990068 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a728f96aff3558ed0b68e21c5f7fd2024e19a5be --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a9beb2791c57d3a1cd43a52b20926f3dc3a5c96d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_ops.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool3d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool3d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..301e616b88b06e64c48116ffb7489567537db1fa --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple max_pool3d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); +} +// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple max_pool3d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); +} + +// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +inline ::std::tuple max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..fbc2f4df13e45aef1952f91a2f552585e6205853 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} +// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} + +// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor +inline at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c0d8ad619529da0ede5335542a72bb371da65c56 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a65e13304eee5a0cc875109a15a61596338787a --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c25f1a07686b5f96874b7b6ad3cfb34131a66fed --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_pool3d_with_indices_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); +TORCH_API at::Tensor max_pool3d_with_indices_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +TORCH_API at::Tensor & max_pool3d_with_indices_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..303d1ad397eca3f70ae222f7a01ef723b3a38942 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool3d_with_indices_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool3d_with_indices_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API max_pool3d_with_indices_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool3d_with_indices_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70cd5af6a0d02dddaf1155bae037fb60fe6153a1 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..597459df1e8d0099a79baaab8e3bb01040d3ef34 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4f8056e945d5ef8cd6dd9f034257b858b59f5f41 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple max_pool3d_with_indices_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_out_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); +TORCH_API ::std::tuple max_pool3d_with_indices_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_out_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..48184a2f16e8cc669aaac83187955b4390eef056 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool3d_with_indices_out { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool3d_with_indices"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); +}; + +struct TORCH_API max_pool3d_with_indices { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool3d_with_indices"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..aac73bdb036b0b62c3b3fe47af9c4182dfd5d6be --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d.h @@ -0,0 +1,92 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); +} +namespace symint { + template >> + at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); + } +} + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); +} +namespace symint { + template >> + at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), out); + } +} + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); +} +namespace symint { + template >> + at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); + } +} + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool2d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); +} +namespace symint { + template >> + at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::call(self, indices, output_size, out); + } +} + +// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor +inline at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, c10::fromIntArrayRefSlow(output_size)); +} +namespace symint { + template >> + at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, c10::fromIntArrayRefSlow(output_size)); + } +} + +// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor +inline at::Tensor max_unpool2d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, output_size); +} +namespace symint { + template >> + at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + return at::_ops::max_unpool2d::call(self, indices, output_size); + } +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ba431dc501e925bbecfbf918f8bf3b86504516e7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor max_unpool2d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & max_unpool2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..181d07fe0b6c14146025bc53aba090d3862a4cbb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor max_unpool2d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & max_unpool2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & max_unpool2d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3d456a4c064bccd975c8e29e43538a5ee53235e3 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_unpooling2d_forward_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpooling2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor max_unpooling2d_forward_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpooling2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ee13418ff7d90e8bcb9812c7cb650f4a23737c25 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_unpool2d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_unpool2d"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out); +}; + +struct TORCH_API max_unpool2d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_unpool2d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..5a2d7b9c2f65532ec470bb6872ce4bc98c5b028d --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d.h @@ -0,0 +1,92 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); +} +namespace symint { + template >> + at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); + } +} + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); +} +namespace symint { + template >> + at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding, out); + } +} + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); +} +namespace symint { + template >> + at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); + } +} + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_unpool3d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); +} +namespace symint { + template >> + at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out); + } +} + +// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor +inline at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding); +} +namespace symint { + template >> + at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, c10::fromIntArrayRefSlow(output_size), stride, padding); + } +} + +// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor +inline at::Tensor max_unpool3d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding); +} +namespace symint { + template >> + at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding); + } +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24efbc157c4a474569e90b9c1241265e267d5efd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor max_unpool3d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & max_unpool3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5ccf388e5c9806a4babb04273bab8c65a836607 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor max_unpool3d_symint(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & max_unpool3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpool3d_symint_outf(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..85b946fa1d9efd43e4699315d041a8bcb04bd9a9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_unpooling3d_forward_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpooling3d_forward_out_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor max_unpooling3d_forward_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & max_unpooling3d_forward_out_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e59917fd119f177992cc6f29f85979569edcb185 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_unpool3d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_unpool3d"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +}; + +struct TORCH_API max_unpool3d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_unpool3d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum.h new file mode 100644 index 0000000000000000000000000000000000000000..f0c7beaf2c85e92ae1137c080ca39a5aad92eca2 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::maximum(Tensor self, Tensor other) -> Tensor +inline at::Tensor maximum(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum::call(self, other); +} + +// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum_out::call(self, other, out); +} +// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::maximum_out::call(self, other, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bc26d25fb1ba658adf0ad9aad3d5c2f5a0c1f349 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e8888437aec7d28a6a7e1095540005ab586533bd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6244dbc3d042b5d1a0f52d34db577ce73960f091 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e910c06bbf4047927d889e39bd0971bd2dcea6c9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_maximum : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5f5d690fbef2c8ffc4b27e0dd37ebddda29766e9 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b4e888b5b0f308b7ee731cbc00071c74c42f2173 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_maximum_out : public at::meta::structured_maximum { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..616708858ab658e85abe29fcd76c24aa4dcab7a4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API maximum { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::maximum"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "maximum(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API maximum_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::maximum"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean.h new file mode 100644 index 0000000000000000000000000000000000000000..1f9850d641f1ff1e554d2cf7a223b333f020d5df --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean.h @@ -0,0 +1,68 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor mean(const at::Tensor & self, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean::call(self, dtype); +} + +// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_dtype_out::call(self, dtype, out); +} +// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_outf(const at::Tensor & self, ::std::optional dtype, at::Tensor & out) { + return at::_ops::mean_dtype_out::call(self, dtype, out); +} + +// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_dim::call(self, dim, keepdim, dtype); +} + +// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_out::call(self, dim, keepdim, dtype, out); +} +// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::mean_out::call(self, dim, keepdim, dtype, out); +} + +// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype); +} + +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out); +} +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..229eca9154922bc64cc07676c35782f648ecc92e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor mean(const at::Tensor & self, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3147afdb48cce3d5c3aed4f780273bcb3d3e11dd --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c03a11d38047acf325d18ae74d7890f70b0a9fe --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6fb64a26c6143a891e68601b01e652243864ddc7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..102bd60b4739956cf49c5d0b78b89f2002deb9ab --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7fa559d6bb998ec35219988a9658388f6bcbd874 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_mean_dim : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e4e7f57b5913a376d7fa89e447a7d22300fbe1c --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7ab672b6944a71c2e3b9c70c57f46d840f97cb95 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor mean(const at::Tensor & self, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_dtype_out(const at::Tensor & self, ::std::optional dtype, at::Tensor & out); +struct TORCH_API structured_mean_out : public at::meta::structured_mean_dim { +void impl(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, const at::Tensor & out); +}; +TORCH_API at::Tensor mean_quantized_cpu(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out_quantized_cpu(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..68ad78610871a17a8c664823b3d13cb57612b1f7 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mean { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mean"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "mean(Tensor self, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional dtype); +}; + +struct TORCH_API mean_dtype_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mean"; + static constexpr const char* overload_name = "dtype_out"; + static constexpr const char* schema_str = "mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional dtype, at::Tensor & out); +}; + +struct TORCH_API mean_dim { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mean"; + static constexpr const char* overload_name = "dim"; + static constexpr const char* schema_str = "mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API mean_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mean"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +struct TORCH_API mean_names_dim { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mean"; + static constexpr const char* overload_name = "names_dim"; + static constexpr const char* schema_str = "mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API mean_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::mean"; + static constexpr const char* overload_name = "names_out"; + static constexpr const char* schema_str = "mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median.h new file mode 100644 index 0000000000000000000000000000000000000000..97da363c4e94b7aad40f9d283c9da00cd32239e4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median.h @@ -0,0 +1,68 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::median(Tensor self) -> Tensor +inline at::Tensor median(const at::Tensor & self) { + return at::_ops::median::call(self); +} + +// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple median(const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::median_dim::call(self, dim, keepdim); +} + +// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::median_dim_values::call(self, dim, keepdim, values, indices); +} +// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple median_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::median_dim_values::call(self, dim, keepdim, values, indices); +} + +// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple median(const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::median_names_dim::call(self, dim, keepdim); +} + +// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::median_names_dim_values::call(self, dim, keepdim, values, indices); +} +// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple median_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::median_names_dim_values::call(self, dim, keepdim, values, indices); +} + +// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & median_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::median_out::call(self, out); +} +// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & median_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::median_out::call(self, out); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..062ae20866c71d27358f8fc6acc46e7a1098d616 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & median_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & median_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API ::std::tuple median(const at::Tensor & self, int64_t dim, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_compositeimplicitautograd_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e22f2ee64d953a68b442a3fc2129edad574f5ea --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple median(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple median_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_cpu_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c62be4ea2ba60569942e7a2fda138ef680f4063 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor median(const at::Tensor & self); +TORCH_API ::std::tuple median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple median_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c13cebb8b4c922364986f7bdf12f4c70561c319 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor median(const at::Tensor & self); +TORCH_API ::std::tuple median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple median_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_native.h new file mode 100644 index 0000000000000000000000000000000000000000..42283961e1f402853194c51b0d118f39bc69fae0 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & median_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor median_cpu(const at::Tensor & self); +TORCH_API at::Tensor median_cuda(const at::Tensor & self); +TORCH_API ::std::tuple median(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple median_out_cpu(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple median_out_cuda(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple median(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple median_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..068bd4711a11648b6a950aa6f0f849f610b4418f --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/median_ops.h @@ -0,0 +1,84 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API median { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::median"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "median(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API median_dim { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::median"; + static constexpr const char* overload_name = "dim"; + static constexpr const char* schema_str = "median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim); +}; + +struct TORCH_API median_dim_values { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::median"; + static constexpr const char* overload_name = "dim_values"; + static constexpr const char* schema_str = "median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API median_names_dim { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::median"; + static constexpr const char* overload_name = "names_dim"; + static constexpr const char* schema_str = "median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"; + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim); +}; + +struct TORCH_API median_names_dim_values { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::median"; + static constexpr const char* overload_name = "names_dim_values"; + static constexpr const char* schema_str = "median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"; + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API median_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::median"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h new file mode 100644 index 0000000000000000000000000000000000000000..00921d66bfc058d8eda7a10a58df478f43f6e5b4 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::meshgrid(Tensor[] tensors) -> Tensor[] +inline ::std::vector meshgrid(at::TensorList tensors) { + return at::_ops::meshgrid::call(tensors); +} + +// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] +inline ::std::vector meshgrid(at::TensorList tensors, c10::string_view indexing) { + return at::_ops::meshgrid_indexing::call(tensors, indexing); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_native.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a8410c7bf6a3c44e60b8958c410d5f161465aceb --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector meshgrid(at::TensorList tensors); +TORCH_API ::std::vector meshgrid(at::TensorList tensors, c10::string_view indexing); +} // namespace native +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_ops.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f02bf5542078577c6347a4dc46ec1255be4aa244 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_ops.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API meshgrid { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::meshgrid"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "meshgrid(Tensor[] tensors) -> Tensor[]"; + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API meshgrid_indexing { + using schema = ::std::vector (at::TensorList, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::meshgrid"; + static constexpr const char* overload_name = "indexing"; + static constexpr const char* schema_str = "meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]"; + static ::std::vector call(at::TensorList tensors, c10::string_view indexing); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, c10::string_view indexing); +}; + +}} // namespace at::_ops diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min.h new file mode 100644 index 0000000000000000000000000000000000000000..45078e841154f00f07a3b527309538f967659d6e --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple min(const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim::call(self, dim, keepdim); +} + +// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim_min::call(self, dim, keepdim, min, min_indices); +} +// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_dim_min::call(self, dim, keepdim, min, min_indices); +} + +// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple min(const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim::call(self, dim, keepdim); +} + +// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim_min::call(self, dim, keepdim, min, min_indices); +} +// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_names_dim_min::call(self, dim, keepdim, min, min_indices); +} + +// aten::min(Tensor self) -> Tensor +inline at::Tensor min(const at::Tensor & self) { + return at::_ops::min::call(self); +} + +// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::min_unary_out::call(self, out); +} +// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::min_unary_out::call(self, out); +} + +// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_out::call(self, other, out); +} +// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::min_out::call(self, other, out); +} + +// aten::min.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor min(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_other::call(self, other); +} + +} diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7d972ba66aab4338481312c15d58c47860c6a28 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple min(const at::Tensor & self, int64_t dim, bool keepdim=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_cuda_dispatch.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13032f04eaee9a0c35286f9e192437ebea2c9cbc --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple min(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices); +TORCH_API at::Tensor min(const at::Tensor & self); +TORCH_API at::Tensor & min_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & min_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta.h b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..bde14060a47c5fffb43d848bd182d2706cbada63 --- /dev/null +++ b/Scripts_Climate_n_LAI_to_Yield/.venv/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_min_dim : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, bool keepdim); +}; + +} // namespace native +} // namespace at