diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..b03dcb3c40524c68d320ae5618ce4ba5a989fcd1
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+*.so filter=lfs diff=lfs merge=lfs -text
+build/torch210-cu128-x86_64-windows/_deformable_detr_cuda_d8a6191.pyd filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..de22c853b79d3ce8045c74d7fad800a5a7e4f958
--- /dev/null
+++ b/README.md
@@ -0,0 +1,22 @@
+---
+license: apache-2.0
+tags:
+ - kernels
+---
+
+
+
+## deformable-detr
+
+Kernel source: https://github.com/huggingface/kernels-community/tree/main/deformable-detr
+
+### Performance
+
+
+
+
+
+
+
+
+
diff --git a/benchmarks/benchmark.py b/benchmarks/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1fdbecaf7a5dbdaa4cf316e3a72778fc9da5589
--- /dev/null
+++ b/benchmarks/benchmark.py
@@ -0,0 +1,250 @@
+import torch
+import torch.nn.functional as F
+
+from kernels.benchmark import Benchmark
+
+
+def ms_deform_attn_reference(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_locations: torch.Tensor,
+ attention_weights: torch.Tensor,
+) -> torch.Tensor:
+ batch, _, num_heads, channels = value.shape
+ _, num_query, _, num_levels, num_points, _ = sampling_locations.shape
+
+ # Split value by levels
+ value_list = []
+ for level_id in range(num_levels):
+ H, W = spatial_shapes[level_id]
+ start_idx = level_start_index[level_id]
+ end_idx = (
+ level_start_index[level_id + 1]
+ if level_id < num_levels - 1
+ else value.shape[1]
+ )
+ # (batch, H*W, num_heads, channels) -> (batch, num_heads, channels, H, W)
+ value_level = value[:, start_idx:end_idx, :, :].view(
+ batch, H, W, num_heads, channels
+ )
+ value_level = value_level.permute(0, 3, 4, 1, 2).contiguous()
+ value_list.append(value_level)
+
+ # Sample from each level
+ output = torch.zeros(
+ batch, num_query, num_heads, channels, device=value.device, dtype=value.dtype
+ )
+
+ for level_id in range(num_levels):
+ H, W = spatial_shapes[level_id]
+ value_level = value_list[level_id] # (batch, num_heads, channels, H, W)
+
+ # Get sampling locations for this level: (batch, num_query, num_heads, num_points, 2)
+ sampling_loc_level = sampling_locations[:, :, :, level_id, :, :]
+
+ # Convert from [0, 1] to [-1, 1] for grid_sample
+ grid = (
+ 2.0 * sampling_loc_level - 1.0
+ ) # (batch, num_query, num_heads, num_points, 2)
+
+ # Reshape for grid_sample: need (batch * num_heads, channels, H, W) and (batch * num_heads, num_query, num_points, 2)
+ value_level = value_level.view(batch * num_heads, channels, H.item(), W.item())
+ grid = grid.permute(
+ 0, 2, 1, 3, 4
+ ).contiguous() # (batch, num_heads, num_query, num_points, 2)
+ grid = grid.view(batch * num_heads, num_query, num_points, 2)
+
+ # Sample: output is (batch * num_heads, channels, num_query, num_points)
+ sampled = F.grid_sample(
+ value_level,
+ grid,
+ mode="bilinear",
+ padding_mode="zeros",
+ align_corners=False,
+ )
+
+ # Reshape back: (batch, num_heads, channels, num_query, num_points)
+ sampled = sampled.view(batch, num_heads, channels, num_query, num_points)
+ # -> (batch, num_query, num_heads, num_points, channels)
+ sampled = sampled.permute(0, 3, 1, 4, 2).contiguous()
+
+ # Get attention weights for this level: (batch, num_query, num_heads, num_points)
+ attn_level = attention_weights[:, :, :, level_id, :]
+
+ # Weighted sum over points: (batch, num_query, num_heads, channels)
+ output += (sampled * attn_level.unsqueeze(-1)).sum(dim=3)
+
+ # Reshape to (batch, num_query, num_heads * channels)
+ output = output.view(batch, num_query, num_heads * channels)
+ return output
+
+
+class MSDeformAttnBenchmark(Benchmark):
+ seed: int = 42
+
+ def setup(self):
+ batch = 2
+ num_heads = 8
+ channels = 32 # embed_dim = num_heads * channels = 256
+ num_levels = 4
+ num_query = 300
+ num_points = 4
+ im2col_step = 64
+
+ # Spatial shapes for 4 levels: 64x64, 32x32, 16x16, 8x8
+ spatial_shapes = torch.tensor(
+ [[64, 64], [32, 32], [16, 16], [8, 8]],
+ dtype=torch.int64,
+ device=self.device,
+ )
+ # Calculate spatial_size = sum of H*W for all levels
+ spatial_size = (64 * 64) + (32 * 32) + (16 * 16) + (8 * 8) # 5440
+
+ # Level start indices
+ level_start_index = torch.tensor(
+ [0, 64 * 64, 64 * 64 + 32 * 32, 64 * 64 + 32 * 32 + 16 * 16],
+ dtype=torch.int64,
+ device=self.device,
+ )
+
+ self.value = torch.randn(
+ batch,
+ spatial_size,
+ num_heads,
+ channels,
+ device=self.device,
+ dtype=torch.float32,
+ )
+ self.spatial_shapes = spatial_shapes
+ self.level_start_index = level_start_index
+ self.sampling_loc = torch.rand(
+ batch,
+ num_query,
+ num_heads,
+ num_levels,
+ num_points,
+ 2,
+ device=self.device,
+ dtype=torch.float32,
+ )
+ self.attn_weight = torch.rand(
+ batch,
+ num_query,
+ num_heads,
+ num_levels,
+ num_points,
+ device=self.device,
+ dtype=torch.float32,
+ )
+ # Normalize attention weights
+ self.attn_weight = self.attn_weight / self.attn_weight.sum(-1, keepdim=True)
+ self.im2col_step = im2col_step
+
+ self.out = torch.empty(
+ batch,
+ num_query,
+ num_heads * channels,
+ device=self.device,
+ dtype=torch.float32,
+ )
+
+ def benchmark_forward(self):
+ self.out = self.kernel.ms_deform_attn_forward(
+ self.value,
+ self.spatial_shapes,
+ self.level_start_index,
+ self.sampling_loc,
+ self.attn_weight,
+ self.im2col_step,
+ )
+
+ def verify_forward(self) -> torch.Tensor:
+ return ms_deform_attn_reference(
+ self.value,
+ self.spatial_shapes,
+ self.level_start_index,
+ self.sampling_loc,
+ self.attn_weight,
+ )
+
+ def setup_large(self):
+ batch = 8
+ num_heads = 8
+ channels = 32
+ num_levels = 4
+ num_query = 900
+ num_points = 4
+ im2col_step = 64
+
+ spatial_shapes = torch.tensor(
+ [[64, 64], [32, 32], [16, 16], [8, 8]],
+ dtype=torch.int64,
+ device=self.device,
+ )
+ spatial_size = (64 * 64) + (32 * 32) + (16 * 16) + (8 * 8)
+
+ level_start_index = torch.tensor(
+ [0, 64 * 64, 64 * 64 + 32 * 32, 64 * 64 + 32 * 32 + 16 * 16],
+ dtype=torch.int64,
+ device=self.device,
+ )
+
+ self.value = torch.randn(
+ batch,
+ spatial_size,
+ num_heads,
+ channels,
+ device=self.device,
+ dtype=torch.float32,
+ )
+ self.spatial_shapes = spatial_shapes
+ self.level_start_index = level_start_index
+ self.sampling_loc = torch.rand(
+ batch,
+ num_query,
+ num_heads,
+ num_levels,
+ num_points,
+ 2,
+ device=self.device,
+ dtype=torch.float32,
+ )
+ self.attn_weight = torch.rand(
+ batch,
+ num_query,
+ num_heads,
+ num_levels,
+ num_points,
+ device=self.device,
+ dtype=torch.float32,
+ )
+ self.attn_weight = self.attn_weight / self.attn_weight.sum(-1, keepdim=True)
+ self.im2col_step = im2col_step
+
+ self.out = torch.empty(
+ batch,
+ num_query,
+ num_heads * channels,
+ device=self.device,
+ dtype=torch.float32,
+ )
+
+ def benchmark_large(self):
+ self.out = self.kernel.ms_deform_attn_forward(
+ self.value,
+ self.spatial_shapes,
+ self.level_start_index,
+ self.sampling_loc,
+ self.attn_weight,
+ self.im2col_step,
+ )
+
+ def verify_large(self) -> torch.Tensor:
+ return ms_deform_attn_reference(
+ self.value,
+ self.spatial_shapes,
+ self.level_start_index,
+ self.sampling_loc,
+ self.attn_weight,
+ )
diff --git a/build/torch210-cu128-x86_64-windows/__init__.py b/build/torch210-cu128-x86_64-windows/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7082e931a91c42eef91df9cfac9d908b919a0443
--- /dev/null
+++ b/build/torch210-cu128-x86_64-windows/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch210-cu128-x86_64-windows/_deformable_detr_cuda_d8a6191.pyd b/build/torch210-cu128-x86_64-windows/_deformable_detr_cuda_d8a6191.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..e190d75a9838ac1220cfa79b38eddf4794533c64
--- /dev/null
+++ b/build/torch210-cu128-x86_64-windows/_deformable_detr_cuda_d8a6191.pyd
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f94825a148a77c630ae4f24f75f65e96dd6e3379653643aa2e81420ba61a9db3
+size 9546240
diff --git a/build/torch210-cu128-x86_64-windows/_ops.py b/build/torch210-cu128-x86_64-windows/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4d286912955f247852eaaf90a1f579c28c2079f
--- /dev/null
+++ b/build/torch210-cu128-x86_64-windows/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_d8a6191
+ops = torch.ops._deformable_detr_cuda_d8a6191
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_d8a6191::{op_name}"
diff --git a/build/torch210-cu128-x86_64-windows/deformable_detr/__init__.py b/build/torch210-cu128-x86_64-windows/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc434ef44e63409acb52a8f3fff54a4adc46ed6a
--- /dev/null
+++ b/build/torch210-cu128-x86_64-windows/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cu128-x86_64-windows/layers.py b/build/torch210-cu128-x86_64-windows/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e682577b15f8985c02cb017a7a1f501f9d912aa
--- /dev/null
+++ b/build/torch210-cu128-x86_64-windows/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch210-cu128-x86_64-windows/metadata.json b/build/torch210-cu128-x86_64-windows/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..7ddf5ff75a35f315c1398fff49390f17fd4e0ee9
--- /dev/null
+++ b/build/torch210-cu128-x86_64-windows/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch210-cxx11-cu126-aarch64-linux/__init__.py b/build/torch210-cxx11-cu126-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch210-cxx11-cu126-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch210-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch210-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..832afe3dbb97a2295e144ce168d7a5c67a230aed
--- /dev/null
+++ b/build/torch210-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:521f9ff226174c047c6d384b8e39a92a427c06496ad87465c554c2b51239e317
+size 8606368
diff --git a/build/torch210-cxx11-cu126-aarch64-linux/_ops.py b/build/torch210-cxx11-cu126-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch210-cxx11-cu126-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch210-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py b/build/torch210-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch210-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu126-aarch64-linux/layers.py b/build/torch210-cxx11-cu126-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch210-cxx11-cu126-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch210-cxx11-cu126-aarch64-linux/metadata.json b/build/torch210-cxx11-cu126-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..f5902b55ab0b2b561c0cf97567c9806c60839c7f
--- /dev/null
+++ b/build/torch210-cxx11-cu126-aarch64-linux/metadata.json
@@ -0,0 +1,18 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0+PTX"
+ ]
+ }
+}
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/__init__.py b/build/torch210-cxx11-cu126-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch210-cxx11-cu126-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch210-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..9608a5bb230a746f8b345729180497d9b3ac8175
--- /dev/null
+++ b/build/torch210-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ddc6b5a40afe614d97965a4f35113df59852130e21699792bb883dcdd8b1228f
+size 8541080
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_ops.py b/build/torch210-cxx11-cu126-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch210-cxx11-cu126-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py b/build/torch210-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch210-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/layers.py b/build/torch210-cxx11-cu126-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch210-cxx11-cu126-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/metadata.json b/build/torch210-cxx11-cu126-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..f5902b55ab0b2b561c0cf97567c9806c60839c7f
--- /dev/null
+++ b/build/torch210-cxx11-cu126-x86_64-linux/metadata.json
@@ -0,0 +1,18 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0+PTX"
+ ]
+ }
+}
diff --git a/build/torch210-cxx11-cu128-aarch64-linux/__init__.py b/build/torch210-cxx11-cu128-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch210-cxx11-cu128-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch210-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch210-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..28bd64f0100dfcacad20a6f84faf8bd94bd14f21
--- /dev/null
+++ b/build/torch210-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6de7e1d1805eb6d51f7588654319610b0a8fb55755ab5b6f2f2c42792ba060a9
+size 11621120
diff --git a/build/torch210-cxx11-cu128-aarch64-linux/_ops.py b/build/torch210-cxx11-cu128-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch210-cxx11-cu128-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch210-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py b/build/torch210-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch210-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu128-aarch64-linux/layers.py b/build/torch210-cxx11-cu128-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch210-cxx11-cu128-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch210-cxx11-cu128-aarch64-linux/metadata.json b/build/torch210-cxx11-cu128-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch210-cxx11-cu128-aarch64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/__init__.py b/build/torch210-cxx11-cu128-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch210-cxx11-cu128-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch210-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..e0837aef438976dc64201466531fa22d2e1d47a5
--- /dev/null
+++ b/build/torch210-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c23af683cd86bb8f8a4426617c869403ea4fdba88b78935c676705e227523716
+size 11524560
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_ops.py b/build/torch210-cxx11-cu128-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch210-cxx11-cu128-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py b/build/torch210-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch210-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/layers.py b/build/torch210-cxx11-cu128-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch210-cxx11-cu128-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/metadata.json b/build/torch210-cxx11-cu128-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch210-cxx11-cu128-x86_64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch210-cxx11-cu130-aarch64-linux/__init__.py b/build/torch210-cxx11-cu130-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch210-cxx11-cu130-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch210-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch210-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..ea9a5199f4914a2807d85ef47b7050c40c19ab62
--- /dev/null
+++ b/build/torch210-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8bd9d5c6f5fdc673181ca67ec66a6f16d9c77a13fef13628b197aaa6d0ed98f
+size 9890792
diff --git a/build/torch210-cxx11-cu130-aarch64-linux/_ops.py b/build/torch210-cxx11-cu130-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch210-cxx11-cu130-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch210-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py b/build/torch210-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch210-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu130-aarch64-linux/layers.py b/build/torch210-cxx11-cu130-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch210-cxx11-cu130-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch210-cxx11-cu130-aarch64-linux/metadata.json b/build/torch210-cxx11-cu130-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..66651b7d3f95ac9e5ce5fc2a641b6f0f50788f87
--- /dev/null
+++ b/build/torch210-cxx11-cu130-aarch64-linux/metadata.json
@@ -0,0 +1,19 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "11.0",
+ "12.0+PTX",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/__init__.py b/build/torch210-cxx11-cu130-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch210-cxx11-cu130-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch210-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..72116a77b08a82caf6f3a2463cb24ece6d7e6d0d
--- /dev/null
+++ b/build/torch210-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c26502008f0002400df7ed708d8014210ad43655701340ce9089bc342bf9e4b
+size 9808368
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_ops.py b/build/torch210-cxx11-cu130-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch210-cxx11-cu130-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py b/build/torch210-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch210-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/layers.py b/build/torch210-cxx11-cu130-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch210-cxx11-cu130-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/metadata.json b/build/torch210-cxx11-cu130-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..66651b7d3f95ac9e5ce5fc2a641b6f0f50788f87
--- /dev/null
+++ b/build/torch210-cxx11-cu130-x86_64-linux/metadata.json
@@ -0,0 +1,19 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "11.0",
+ "12.0+PTX",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch211-cxx11-cu126-aarch64-linux/__init__.py b/build/torch211-cxx11-cu126-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch211-cxx11-cu126-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch211-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch211-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..d9acec40b9800851583896d49d4e1f360da67381
--- /dev/null
+++ b/build/torch211-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:422cd51f413bc85e60b606ab8ead8850331a3b5e288d8a0e4923b10a68c0f4e5
+size 8606480
diff --git a/build/torch211-cxx11-cu126-aarch64-linux/_ops.py b/build/torch211-cxx11-cu126-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch211-cxx11-cu126-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch211-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py b/build/torch211-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch211-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch211-cxx11-cu126-aarch64-linux/layers.py b/build/torch211-cxx11-cu126-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch211-cxx11-cu126-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch211-cxx11-cu126-aarch64-linux/metadata.json b/build/torch211-cxx11-cu126-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..f5902b55ab0b2b561c0cf97567c9806c60839c7f
--- /dev/null
+++ b/build/torch211-cxx11-cu126-aarch64-linux/metadata.json
@@ -0,0 +1,18 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0+PTX"
+ ]
+ }
+}
diff --git a/build/torch211-cxx11-cu126-x86_64-linux/__init__.py b/build/torch211-cxx11-cu126-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch211-cxx11-cu126-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch211-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch211-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..726388a94fbddff89fc093ea63f824c1ed67865d
--- /dev/null
+++ b/build/torch211-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:394d93e2b5c6caf4fe6eb6df79ac20f2e85d0ca1cec7287991d05dee7541c63e
+size 8541176
diff --git a/build/torch211-cxx11-cu126-x86_64-linux/_ops.py b/build/torch211-cxx11-cu126-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch211-cxx11-cu126-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch211-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py b/build/torch211-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch211-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch211-cxx11-cu126-x86_64-linux/layers.py b/build/torch211-cxx11-cu126-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch211-cxx11-cu126-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch211-cxx11-cu126-x86_64-linux/metadata.json b/build/torch211-cxx11-cu126-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..f5902b55ab0b2b561c0cf97567c9806c60839c7f
--- /dev/null
+++ b/build/torch211-cxx11-cu126-x86_64-linux/metadata.json
@@ -0,0 +1,18 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0+PTX"
+ ]
+ }
+}
diff --git a/build/torch211-cxx11-cu128-aarch64-linux/__init__.py b/build/torch211-cxx11-cu128-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch211-cxx11-cu128-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch211-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch211-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..a5ccb3843c84033cb48492465a41419d26e8bc96
--- /dev/null
+++ b/build/torch211-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dbae2fe41f5527c079dbeee093a6a66d3355834555cfd9a8d23ebcf4ae3576c
+size 11621232
diff --git a/build/torch211-cxx11-cu128-aarch64-linux/_ops.py b/build/torch211-cxx11-cu128-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch211-cxx11-cu128-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch211-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py b/build/torch211-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch211-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch211-cxx11-cu128-aarch64-linux/layers.py b/build/torch211-cxx11-cu128-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch211-cxx11-cu128-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch211-cxx11-cu128-aarch64-linux/metadata.json b/build/torch211-cxx11-cu128-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch211-cxx11-cu128-aarch64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch211-cxx11-cu128-x86_64-linux/__init__.py b/build/torch211-cxx11-cu128-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch211-cxx11-cu128-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch211-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch211-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..9ad265824be46f486b2ee5f138df2fb13dfc16bd
--- /dev/null
+++ b/build/torch211-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b266559af6309a2afc7175eeee0f5cf01dd091b9fa035aaf13f813daeb6b6c56
+size 11528752
diff --git a/build/torch211-cxx11-cu128-x86_64-linux/_ops.py b/build/torch211-cxx11-cu128-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch211-cxx11-cu128-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch211-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py b/build/torch211-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch211-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch211-cxx11-cu128-x86_64-linux/layers.py b/build/torch211-cxx11-cu128-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch211-cxx11-cu128-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch211-cxx11-cu128-x86_64-linux/metadata.json b/build/torch211-cxx11-cu128-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch211-cxx11-cu128-x86_64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch211-cxx11-cu130-aarch64-linux/__init__.py b/build/torch211-cxx11-cu130-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch211-cxx11-cu130-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch211-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch211-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..48adbc5c1b67101b23753ab114fdb6d9396fec0c
--- /dev/null
+++ b/build/torch211-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d10d14abde7394a1447542f3c8084161c7578c18f0218fda307fb805849b4f1
+size 9890904
diff --git a/build/torch211-cxx11-cu130-aarch64-linux/_ops.py b/build/torch211-cxx11-cu130-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch211-cxx11-cu130-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch211-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py b/build/torch211-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch211-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch211-cxx11-cu130-aarch64-linux/layers.py b/build/torch211-cxx11-cu130-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch211-cxx11-cu130-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch211-cxx11-cu130-aarch64-linux/metadata.json b/build/torch211-cxx11-cu130-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..66651b7d3f95ac9e5ce5fc2a641b6f0f50788f87
--- /dev/null
+++ b/build/torch211-cxx11-cu130-aarch64-linux/metadata.json
@@ -0,0 +1,19 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "11.0",
+ "12.0+PTX",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch211-cxx11-cu130-x86_64-linux/__init__.py b/build/torch211-cxx11-cu130-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch211-cxx11-cu130-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch211-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch211-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..d47a6e507a41eb52f764e039d2fe396a874630c7
--- /dev/null
+++ b/build/torch211-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fceb9d09c17ef824a862bc554d215eaddb4a7c7c01b7be5f8c9b3ab75c00deec
+size 9808456
diff --git a/build/torch211-cxx11-cu130-x86_64-linux/_ops.py b/build/torch211-cxx11-cu130-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch211-cxx11-cu130-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch211-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py b/build/torch211-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch211-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch211-cxx11-cu130-x86_64-linux/layers.py b/build/torch211-cxx11-cu130-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch211-cxx11-cu130-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch211-cxx11-cu130-x86_64-linux/metadata.json b/build/torch211-cxx11-cu130-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..66651b7d3f95ac9e5ce5fc2a641b6f0f50788f87
--- /dev/null
+++ b/build/torch211-cxx11-cu130-x86_64-linux/metadata.json
@@ -0,0 +1,19 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "11.0",
+ "12.0+PTX",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..f622604e89058689647ba41b3c71ccbd3aa68ae7
--- /dev/null
+++ b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae00c12295a458e2534149aea16da0289541447123c19fae59baaf6d6d2752f1
+size 6693656
diff --git a/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/layers.py b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch25-cxx11-cu118-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/__init__.py b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..25768c52fa2f7041778baad9b1beaaee6772e8d3
--- /dev/null
+++ b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ab8cf59779b768359df0fa268b6cd52be2f518dd4fafdd61baec31c64f44813
+size 6679440
diff --git a/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_ops.py b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/layers.py b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch25-cxx11-cu121-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..a574c7a01d734f9f4fb7210058d3064c280c5564
--- /dev/null
+++ b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5cdcd6902a03140074cff4cd44bf6b47dc27a32e13e0515a93929c66be186cab
+size 6652680
diff --git a/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/layers.py b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch25-cxx11-cu124-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..6360ed5a03fd9fbe757e8b7c1816f9935b13764f
--- /dev/null
+++ b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82174ec2812ee672a447b94fb5ec907e348eb3d0be338daddf145a1d74969a6f
+size 6686592
diff --git a/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/layers.py b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch25-cxx98-cu118-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/__init__.py b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..d23c2f012773987542cbcab34a84e3eb240bdff9
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9d8540a4ffa00d331f60204fe6baf543a45667d6bba2c0a0b23aca9202b6233
+size 6672464
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_ops.py b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/layers.py b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..aaa5548734ba34766edf646277e25b48752d6b7f
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fa748a4de72c06de09f46b4af4fec7f23cb2c76eb8683c117fefd20833cd3fd8
+size 6649800
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/layers.py b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..64ea4bcc2dfb73951fa3315247c594ea5b6aee1c
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57e38bac3087c1446307e504b1e22e61ae584d1de7f5b3d15bd7a60780c3431c
+size 6693632
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/layers.py b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..658ac25c98e67be8affb211b20fc2af46efcbca2
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d7c09d3bedd89d7119e7023a07784724d3a3f79664b75fce37b778ef3bcfe52
+size 6648656
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/layers.py b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..d1b1fe211439d00ca4681305f56e91b4027fe45e
--- /dev/null
+++ b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6856b6efe5130f019f6cb7f964d7a2073f1ecc5cd7afc850334e64798f871dae
+size 6833224
diff --git a/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/layers.py b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx11-cu126-aarch64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..10bcc5c4bad459abe354c507166121a5a47d5d74
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c76ad874b78882d3108a7fdaf49f8c00b6a6a7dceec63912118f8fa7d07e5f30
+size 6800656
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/layers.py b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..a15ec9d8735e3de07ebd3bfaae1b2ded8de87447
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1fb3a24fd95c1cc3cba080ae1c9d4217f377435770c7e423de53b11ecc437dc
+size 6686600
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/layers.py b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..63c34c123c4b78349bc1a2bb92b0614c4b689a13
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d5a455975be5790964cc95c6813d293b1aba581f5c2dc132c9a08690bf6e5cad
+size 6649808
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/layers.py b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/__init__.py b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..6740aef68abf0d71d0756d6b2f78fc2da67c6752
--- /dev/null
+++ b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c13dce2b080676eb192d87ba83df6ef1f6d0f1101727f4b29185d48dec7281d
+size 6829872
diff --git a/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_ops.py b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/layers.py b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx98-cu126-aarch64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/__init__.py b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..f3ce58cab856be1f382aa9f3cf1bb0b1ec06d41d
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6ad344319579f0abef7fe1a9d3f479f1c8737994f563a540815a1445020959e
+size 6797712
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/layers.py b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a18bf078a3920470e28c50b9bc3b7efffad37d72
Binary files /dev/null and b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a6f280954dd80fbb6d49390c446f50f2924d3fd
Binary files /dev/null and b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..097f24d7bc07674acd326d3a99991123d90a5838
Binary files /dev/null and b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..f7dc97abb0f55df13905e5275d76fa859beb7a30
--- /dev/null
+++ b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9293f97cc6b06bc3ba5e57cfd084abb252c287f4518935208e67e126e7cbd19b
+size 6800224
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..39c2aa7875432779e86612a0e56271fe32133953
--- /dev/null
+++ b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_57c3d32
+ops = torch.ops._deformable_detr_57c3d32
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_57c3d32::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/layers.py b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch27-cxx11-cu118-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..63b666838685ae29d11438e79273b09b38f9df39
--- /dev/null
+++ b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_7c33cbe.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2831b68229a910e8703cae2c9e720ded825e401745d38923548c444e56c37b
+size 6833456
diff --git a/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..676ee6fea64b714dedb7ccd1d54148dcf75575a6
--- /dev/null
+++ b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_7c33cbe
+ops = torch.ops._deformable_detr_7c33cbe
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_7c33cbe::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/layers.py b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch27-cxx11-cu126-aarch64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..03f23b4fd38706ba1888acf199b905f851d2b024
Binary files /dev/null and b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cca62822318d51b9dbb51466f3e0a6d5fd388306
Binary files /dev/null and b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7dfb8dda54ffc9062a129331374926b1fe104ecd
Binary files /dev/null and b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..abe151e58a46c02191f11213027731ed26b7a182
--- /dev/null
+++ b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df54f46f59b5b78b15314cb0825d8f1f34c7a3198e9d62ca2a65a8ca72ea79a4
+size 6911280
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..39c2aa7875432779e86612a0e56271fe32133953
--- /dev/null
+++ b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_57c3d32
+ops = torch.ops._deformable_detr_57c3d32
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_57c3d32::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/layers.py b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch27-cxx11-cu126-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c7fa735b9a5720691a5ed6c31e566b5bc81dd77
Binary files /dev/null and b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..499ecc259157bcef7916fea79bdd097407854ba3
Binary files /dev/null and b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5483ccd7f0aaaeffd23797fbec5d47f3e8f49080
Binary files /dev/null and b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..0c564ee29cec2888bffa2ce31732d90efb502625
--- /dev/null
+++ b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8926ca42814a03cdbac750f3a0cd3e3cbc28614a58e1ca5a77e82b3ad0148043
+size 9979264
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..155c52bfca54e55425b639314b289e668c5a6ec2
--- /dev/null
+++ b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_320b408
+ops = torch.ops._deformable_detr_320b408
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/layers.py b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..381ef42a3282d0c2787e94f3c29d999e4736d7c8
Binary files /dev/null and b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9291f79801323c5a3efc995f9c90ffa6cc35a50c
Binary files /dev/null and b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94595475894bf15ef880e78d37ec23bca83bed08
Binary files /dev/null and b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..2f388366cf92d9a815c26db28ca4a855f8e713ba
--- /dev/null
+++ b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/_deformable_detr_57c3d32.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f389536870cd4acf36ab8f12d3b0bf9f847ec06e2cfc25905420796884b614e
+size 9907368
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/_ops.py b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..39c2aa7875432779e86612a0e56271fe32133953
--- /dev/null
+++ b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_57c3d32
+ops = torch.ops._deformable_detr_57c3d32
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_57c3d32::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/layers.py b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch27-cxx11-cu128-x86_64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..664a914e405aa872821d228b552f22a99cb39d97
Binary files /dev/null and b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..273cdfd52347ebcf0e0c050945d37c5dc5094a9a
Binary files /dev/null and b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..082d05eb36d7ef831ca076e3e7d2eb070130c2ec
Binary files /dev/null and b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..3d206228684c315eb04160dc62c8a7cb6811156a
--- /dev/null
+++ b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b4a5665b05309312200ca97a80cc61340c0f5de123ab33254e5307a5ec4ed2a0
+size 6901024
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..019cf7ce30dc11d9b791075404417b1ac47500e7
--- /dev/null
+++ b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_a92c8ea_dirty
+ops = torch.ops._deformable_detr_a92c8ea_dirty
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_a92c8ea_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/layers.py b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch28-cxx11-cu126-aarch64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_deformable_detr_d7966ee.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/_deformable_detr_d7966ee.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..818f37b941ec19250adbb6feb357a0ec81c67b2a
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/_deformable_detr_d7966ee.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8e98e8cdd688603d90d9bbc9fc7fd093d7c8f098ae239b33d59db563ca20d3f
+size 8535712
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b498ff9a9fd0bc22a42440001932cf97a8a9e955
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_d7966ee
+ops = torch.ops._deformable_detr_d7966ee
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_d7966ee::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/layers.py b/build/torch28-cxx11-cu126-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/metadata.json b/build/torch28-cxx11-cu126-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..9cf5deed9898dce769f4cc73913d3530b92a0bd8
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/metadata.json
@@ -0,0 +1,4 @@
+{
+ "version": 1,
+ "python-depends": []
+}
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28a5933156ae99fa286e692f8e98dd04e50f5174
Binary files /dev/null and b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0009fad4a606c6aacd228a2d7af5e6263d47d861
Binary files /dev/null and b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18befc2ceb219257a717fe3c9800618101a3904c
Binary files /dev/null and b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..7a0b563d8285a538c60f993ba90ffee3d5b49ec6
--- /dev/null
+++ b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f4a0a7850968822e26e3a59c801fd711231d5294193155efbf9583761e114ef
+size 9849688
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..019cf7ce30dc11d9b791075404417b1ac47500e7
--- /dev/null
+++ b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_a92c8ea_dirty
+ops = torch.ops._deformable_detr_a92c8ea_dirty
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_a92c8ea_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/layers.py b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch28-cxx11-cu128-aarch64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_deformable_detr_d7966ee.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/_deformable_detr_d7966ee.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..2118be1b65bcb2c028582b19fa63835513865bf1
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/_deformable_detr_d7966ee.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:615c916fe00481be53757d381f62c663c3519bf5d0dda09514b13bf9e493b807
+size 11523184
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b498ff9a9fd0bc22a42440001932cf97a8a9e955
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_d7966ee
+ops = torch.ops._deformable_detr_d7966ee
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_d7966ee::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/layers.py b/build/torch28-cxx11-cu128-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/metadata.json b/build/torch28-cxx11-cu128-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..9cf5deed9898dce769f4cc73913d3530b92a0bd8
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/metadata.json
@@ -0,0 +1,4 @@
+{
+ "version": 1,
+ "python-depends": []
+}
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__init__.py b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3455ebe146d88ed909d4d3a22cbafdb7062e1a5c
Binary files /dev/null and b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d0da2a8b90739e30e9e2787462994764ddd0e6b
Binary files /dev/null and b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f35af2756ff4a30e7bd8d95092215bc26766097d
Binary files /dev/null and b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..fe1c2e4e6b379f20da5c38cee026aac074b71fb4
--- /dev/null
+++ b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c7f99924b14f0522d25c5f0a307ab1364a3f76d9ecf29684f80aa388f6bd443b
+size 10047704
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_ops.py b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..155c52bfca54e55425b639314b289e668c5a6ec2
--- /dev/null
+++ b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_320b408
+ops = torch.ops._deformable_detr_320b408
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/layers.py b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_deformable_detr_d7966ee.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/_deformable_detr_d7966ee.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..be60b895fec86b2ac0f8f4387d10002f63cf4c8f
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/_deformable_detr_d7966ee.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b38ea9a577233aee07a6417d5651076d27940a1a5b9b7edb89d1745a885b071
+size 11581544
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b498ff9a9fd0bc22a42440001932cf97a8a9e955
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_d7966ee
+ops = torch.ops._deformable_detr_d7966ee
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_d7966ee::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/deformable_detr/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/layers.py b/build/torch28-cxx11-cu129-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/metadata.json b/build/torch28-cxx11-cu129-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..9cf5deed9898dce769f4cc73913d3530b92a0bd8
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/metadata.json
@@ -0,0 +1,4 @@
+{
+ "version": 1,
+ "python-depends": []
+}
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/__init__.py b/build/torch29-cxx11-cu126-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu126-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so b/build/torch29-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..097315236d6852aa1cf13eef398aafa6258dac7e
--- /dev/null
+++ b/build/torch29-cxx11-cu126-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e699c1c6b8c41b718c233089e16c5683fa782dd1b87e61e954941ab5f47d8341
+size 8604944
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/_ops.py b/build/torch29-cxx11-cu126-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b150fa4c35414012586d1ba33daac6779a49d93a
--- /dev/null
+++ b/build/torch29-cxx11-cu126-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_a06632f
+ops = torch.ops._deformable_detr_cuda_a06632f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_a06632f::{op_name}"
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/layers.py b/build/torch29-cxx11-cu126-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu126-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/metadata.json b/build/torch29-cxx11-cu126-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..f5902b55ab0b2b561c0cf97567c9806c60839c7f
--- /dev/null
+++ b/build/torch29-cxx11-cu126-aarch64-linux/metadata.json
@@ -0,0 +1,18 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0+PTX"
+ ]
+ }
+}
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu126-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so b/build/torch29-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..a404f522dd98443c7f9f6d2fb9df13ca99433b0c
--- /dev/null
+++ b/build/torch29-cxx11-cu126-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3165afbd76a4a3cdb1d23f014e551cb7d4a43286bbb4d356148f4efc232f6d2
+size 8535888
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_ops.py b/build/torch29-cxx11-cu126-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b150fa4c35414012586d1ba33daac6779a49d93a
--- /dev/null
+++ b/build/torch29-cxx11-cu126-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_a06632f
+ops = torch.ops._deformable_detr_cuda_a06632f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_a06632f::{op_name}"
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch29-cxx11-cu126-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/layers.py b/build/torch29-cxx11-cu126-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu126-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/metadata.json b/build/torch29-cxx11-cu126-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..f5902b55ab0b2b561c0cf97567c9806c60839c7f
--- /dev/null
+++ b/build/torch29-cxx11-cu126-x86_64-linux/metadata.json
@@ -0,0 +1,18 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0+PTX"
+ ]
+ }
+}
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/__init__.py b/build/torch29-cxx11-cu128-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu128-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so b/build/torch29-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..c35d7e717a3c4a8dc01bc49ee48ebe22d0899a68
--- /dev/null
+++ b/build/torch29-cxx11-cu128-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ee686cad364dc6a654fe2b43da0bf46fc1df3f3156dc1865a7e4811d811686e8
+size 11619232
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/_ops.py b/build/torch29-cxx11-cu128-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b150fa4c35414012586d1ba33daac6779a49d93a
--- /dev/null
+++ b/build/torch29-cxx11-cu128-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_a06632f
+ops = torch.ops._deformable_detr_cuda_a06632f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_a06632f::{op_name}"
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/layers.py b/build/torch29-cxx11-cu128-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu128-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/metadata.json b/build/torch29-cxx11-cu128-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch29-cxx11-cu128-aarch64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu128-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so b/build/torch29-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..c66150f385ca1e7073cae942add6b6fceadd27d6
--- /dev/null
+++ b/build/torch29-cxx11-cu128-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c851c52dba9eb591cfd264a8764b1880149fc40a94781c1e5bba5f3702b0cc22
+size 11519264
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_ops.py b/build/torch29-cxx11-cu128-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b150fa4c35414012586d1ba33daac6779a49d93a
--- /dev/null
+++ b/build/torch29-cxx11-cu128-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_a06632f
+ops = torch.ops._deformable_detr_cuda_a06632f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_a06632f::{op_name}"
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch29-cxx11-cu128-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/layers.py b/build/torch29-cxx11-cu128-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu128-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/metadata.json b/build/torch29-cxx11-cu128-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch29-cxx11-cu128-x86_64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch29-cxx11-cu129-aarch64-linux/__init__.py b/build/torch29-cxx11-cu129-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu129-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu129-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch29-cxx11-cu129-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..fb87a02133150612fc282ab1009983917460c8c6
--- /dev/null
+++ b/build/torch29-cxx11-cu129-aarch64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c342be48ef1c29b9b9dbbc2a0387a452d1ce84a5db276ed67dd310bd6a6240f9
+size 11686192
diff --git a/build/torch29-cxx11-cu129-aarch64-linux/_ops.py b/build/torch29-cxx11-cu129-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch29-cxx11-cu129-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch29-cxx11-cu129-aarch64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu129-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch29-cxx11-cu129-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu129-aarch64-linux/layers.py b/build/torch29-cxx11-cu129-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu129-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu129-aarch64-linux/metadata.json b/build/torch29-cxx11-cu129-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch29-cxx11-cu129-aarch64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch29-cxx11-cu129-x86_64-linux/__init__.py b/build/torch29-cxx11-cu129-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu129-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu129-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so b/build/torch29-cxx11-cu129-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..31d4459963115d93241bd1c24a4c5ae4a1ac6a90
--- /dev/null
+++ b/build/torch29-cxx11-cu129-x86_64-linux/_deformable_detr_cuda_52e302f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a2eda9aeb3a0901e93fbb168c621575fa32616a4e7d28ce533a196bb7eb60ef
+size 11581720
diff --git a/build/torch29-cxx11-cu129-x86_64-linux/_ops.py b/build/torch29-cxx11-cu129-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8034e91357d8b44db7912f0c94210eb4f5256c
--- /dev/null
+++ b/build/torch29-cxx11-cu129-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_52e302f
+ops = torch.ops._deformable_detr_cuda_52e302f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_52e302f::{op_name}"
diff --git a/build/torch29-cxx11-cu129-x86_64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu129-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b2672c1cd85b74c1b3ded0fc0b2100e1aeac23
--- /dev/null
+++ b/build/torch29-cxx11-cu129-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import importlib.util
+import sys
+from pathlib import Path
+from types import ModuleType
+
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu129-x86_64-linux/layers.py b/build/torch29-cxx11-cu129-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu129-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu129-x86_64-linux/metadata.json b/build/torch29-cxx11-cu129-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b796af185fbbd8594fcd846949aa5fadc0ccdda
--- /dev/null
+++ b/build/torch29-cxx11-cu129-x86_64-linux/metadata.json
@@ -0,0 +1,21 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "10.1",
+ "12.0+PTX",
+ "7.0",
+ "7.2",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/__init__.py b/build/torch29-cxx11-cu130-aarch64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu130-aarch64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so b/build/torch29-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..2404e808024343acd3462674651dcfcdf9146404
--- /dev/null
+++ b/build/torch29-cxx11-cu130-aarch64-linux/_deformable_detr_cuda_a06632f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0ab087e15a52004500698e23cb707eaec57fa16de384ea5388e0a396318f088
+size 9888904
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/_ops.py b/build/torch29-cxx11-cu130-aarch64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b150fa4c35414012586d1ba33daac6779a49d93a
--- /dev/null
+++ b/build/torch29-cxx11-cu130-aarch64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_a06632f
+ops = torch.ops._deformable_detr_cuda_a06632f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_a06632f::{op_name}"
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/layers.py b/build/torch29-cxx11-cu130-aarch64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu130-aarch64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/metadata.json b/build/torch29-cxx11-cu130-aarch64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..66651b7d3f95ac9e5ce5fc2a641b6f0f50788f87
--- /dev/null
+++ b/build/torch29-cxx11-cu130-aarch64-linux/metadata.json
@@ -0,0 +1,19 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "11.0",
+ "12.0+PTX",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33db73ca6e361af4707ba5bb5f55bf0e7c3005a4
--- /dev/null
+++ b/build/torch29-cxx11-cu130-x86_64-linux/__init__.py
@@ -0,0 +1,46 @@
+from typing import List
+import torch
+
+from ._ops import ops
+from . import layers
+
+
+def ms_deform_attn_backward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ grad_output: torch.Tensor,
+ im2col_step: int,
+) -> List[torch.Tensor]:
+ return ops.ms_deform_attn_backward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ grad_output,
+ im2col_step,
+ )
+
+
+def ms_deform_attn_forward(
+ value: torch.Tensor,
+ spatial_shapes: torch.Tensor,
+ level_start_index: torch.Tensor,
+ sampling_loc: torch.Tensor,
+ attn_weight: torch.Tensor,
+ im2col_step: int,
+) -> torch.Tensor:
+ return ops.ms_deform_attn_forward(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_loc,
+ attn_weight,
+ im2col_step,
+ )
+
+
+__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so b/build/torch29-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so
new file mode 100644
index 0000000000000000000000000000000000000000..6c6e678cc4f728eabb7c7458f8e574884a33d02a
--- /dev/null
+++ b/build/torch29-cxx11-cu130-x86_64-linux/_deformable_detr_cuda_a06632f.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:40af5d2edddffdf5c91a80083c35ceaacdcc6d08e1208bd85c78b4b533e11643
+size 9803064
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_ops.py b/build/torch29-cxx11-cu130-x86_64-linux/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b150fa4c35414012586d1ba33daac6779a49d93a
--- /dev/null
+++ b/build/torch29-cxx11-cu130-x86_64-linux/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _deformable_detr_cuda_a06632f
+ops = torch.ops._deformable_detr_cuda_a06632f
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_deformable_detr_cuda_a06632f::{op_name}"
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03dbc1afe1cf156661a2b1b22003cd5f599a0309
--- /dev/null
+++ b/build/torch29-cxx11-cu130-x86_64-linux/deformable_detr/__init__.py
@@ -0,0 +1,26 @@
+import ctypes
+import sys
+
+import importlib
+from pathlib import Path
+from types import ModuleType
+
+def _import_from_path(file_path: Path) -> ModuleType:
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
+ # it would also be used for other imports. So, we make a module name that
+ # depends on the path for it to be unique using the hex-encoded hash of
+ # the path.
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
+ module_name = path_hash
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
+ if spec is None:
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
+ module = importlib.util.module_from_spec(spec)
+ if module is None:
+ raise ImportError(f"Cannot load module {module_name} from spec")
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module) # type: ignore
+ return module
+
+
+globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/layers.py b/build/torch29-cxx11-cu130-x86_64-linux/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..db94032dea3d445f27017f923ae80468e18d2d77
--- /dev/null
+++ b/build/torch29-cxx11-cu130-x86_64-linux/layers.py
@@ -0,0 +1,84 @@
+from typing import List, Union, Tuple
+
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ context.im2col_step = im2col_step
+ output = ops.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+class MultiScaleDeformableAttention(nn.Module):
+ def forward(
+ self,
+ value: Tensor,
+ value_spatial_shapes: Tensor,
+ value_spatial_shapes_list: List[Tuple],
+ level_start_index: Tensor,
+ sampling_locations: Tensor,
+ attention_weights: Tensor,
+ im2col_step: int,
+ ):
+ return MultiScaleDeformableAttentionFunction.apply(
+ value,
+ value_spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ )
+
+
+__all__ = ["MultiScaleDeformableAttention"]
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/metadata.json b/build/torch29-cxx11-cu130-x86_64-linux/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..66651b7d3f95ac9e5ce5fc2a641b6f0f50788f87
--- /dev/null
+++ b/build/torch29-cxx11-cu130-x86_64-linux/metadata.json
@@ -0,0 +1,19 @@
+{
+ "version": 1,
+ "license": "Apache-2.0",
+ "python-depends": [],
+ "backend": {
+ "type": "cuda",
+ "archs": [
+ "10.0",
+ "11.0",
+ "12.0+PTX",
+ "7.5",
+ "8.0",
+ "8.6",
+ "8.7",
+ "8.9",
+ "9.0"
+ ]
+ }
+}
diff --git a/media/benches_dark_animation.svg b/media/benches_dark_animation.svg
new file mode 100644
index 0000000000000000000000000000000000000000..411f6cad85c072e30b9f504443ea1096b22f0220
--- /dev/null
+++ b/media/benches_dark_animation.svg
@@ -0,0 +1,33 @@
+
\ No newline at end of file
diff --git a/media/benches_dark_latency.svg b/media/benches_dark_latency.svg
new file mode 100644
index 0000000000000000000000000000000000000000..c1f463ef72d80d5b8477b379f82accdf599015c9
--- /dev/null
+++ b/media/benches_dark_latency.svg
@@ -0,0 +1,1921 @@
+
+
+
diff --git a/media/benches_dark_throughput.svg b/media/benches_dark_throughput.svg
new file mode 100644
index 0000000000000000000000000000000000000000..26dbf3c0760c7b4502f78405af99a3d6df45f404
--- /dev/null
+++ b/media/benches_dark_throughput.svg
@@ -0,0 +1,2154 @@
+
+
+
diff --git a/media/benches_light_animation.svg b/media/benches_light_animation.svg
new file mode 100644
index 0000000000000000000000000000000000000000..74f121c92532dc73ef37916304299fd966d659c2
--- /dev/null
+++ b/media/benches_light_animation.svg
@@ -0,0 +1,33 @@
+
\ No newline at end of file
diff --git a/media/benches_light_latency.svg b/media/benches_light_latency.svg
new file mode 100644
index 0000000000000000000000000000000000000000..763ad698f173bcb853787f4c0cb5ec3e9e89b728
--- /dev/null
+++ b/media/benches_light_latency.svg
@@ -0,0 +1,1921 @@
+
+
+
diff --git a/media/benches_light_throughput.svg b/media/benches_light_throughput.svg
new file mode 100644
index 0000000000000000000000000000000000000000..68b3e709deb0708192f4d89ab695f987cc14ee04
--- /dev/null
+++ b/media/benches_light_throughput.svg
@@ -0,0 +1,2154 @@
+
+
+