| """Save CTransPath model in TorchScript format. |
| |
| Adapted from https://github.com/Xiyue-Wang/TransPath |
| |
| Licensed GPL 3.0. |
| """ |
|
|
| import sys |
|
|
| |
| sys.path.append("timm-0.5.4/") |
|
|
| import timm |
| from timm.models.layers.helpers import to_2tuple |
| import torch |
| import torch.nn as nn |
|
|
| assert timm.__version__ == "0.5.4" |
|
|
|
|
| class ConvStem(nn.Module): |
| def __init__( |
| self, |
| img_size=224, |
| patch_size=4, |
| in_chans=3, |
| embed_dim=768, |
| norm_layer=None, |
| flatten=True, |
| ): |
| super().__init__() |
|
|
| assert patch_size == 4 |
| assert embed_dim % 8 == 0 |
|
|
| img_size = to_2tuple(img_size) |
| patch_size = to_2tuple(patch_size) |
| self.img_size = img_size |
| self.patch_size = patch_size |
| self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) |
| self.num_patches = self.grid_size[0] * self.grid_size[1] |
| self.flatten = flatten |
|
|
| stem = [] |
| input_dim, output_dim = 3, embed_dim // 8 |
| for l in range(2): |
| stem.append( |
| nn.Conv2d( |
| input_dim, |
| output_dim, |
| kernel_size=3, |
| stride=2, |
| padding=1, |
| bias=False, |
| ) |
| ) |
| stem.append(nn.BatchNorm2d(output_dim)) |
| stem.append(nn.ReLU(inplace=True)) |
| input_dim = output_dim |
| output_dim *= 2 |
| stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1)) |
| self.proj = nn.Sequential(*stem) |
|
|
| self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() |
|
|
| def forward(self, x): |
| B, C, H, W = x.shape |
| assert ( |
| H == self.img_size[0] and W == self.img_size[1] |
| ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
| x = self.proj(x) |
| if self.flatten: |
| x = x.flatten(2).transpose(1, 2) |
| x = self.norm(x) |
| return x |
|
|
|
|
| def ctranspath(): |
| model = timm.create_model( |
| "swin_tiny_patch4_window7_224", embed_layer=ConvStem, pretrained=False |
| ) |
| return model |
|
|
|
|
| model = ctranspath() |
| model.head = torch.nn.Identity() |
| td = torch.load("ctranspath.pth") |
| model.load_state_dict(td["model"], strict=True) |
|
|
| jitted = torch.jit.script(model) |
| torch.jit.save(jitted, "torchscript_model.pt") |
|
|
| torch.onnx.export( |
| model, |
| args=torch.ones(1, 3, 224, 224), |
| f="model.onnx", |
| input_names=["image"], |
| output_names=["embedding"], |
| dynamic_axes={ |
| "image": {0: "batch_size"}, |
| "embedding": {0: "batch_size"}, |
| }, |
| ) |
|
|