| |
| |
|
|
| |
| |
|
|
| import math |
| import torch |
| import torch.nn as nn |
| from functools import partial, reduce |
| from operator import mul |
|
|
| from timm.layers.helpers import to_2tuple |
| from timm.models.vision_transformer import VisionTransformer, _cfg |
| from timm.models.vision_transformer import PatchEmbed |
|
|
| __all__ = [ |
| 'vit_small', |
| 'vit_base', |
| 'vit_large', |
| 'vit_conv_small', |
| 'vit_conv_base', |
| ] |
|
|
|
|
| def patchify_avg(input_tensor, patch_size): |
| |
| if input_tensor.dim() != 4: |
| raise ValueError("Input tensor must be 4D (batch_size, channels, height, width)") |
|
|
| |
| batch_size, channels, height, width = input_tensor.shape |
|
|
| |
| patch_height, patch_width = patch_size, patch_size |
| if height % patch_height != 0 or width % patch_width != 0: |
| raise ValueError("Input tensor dimensions must be divisible by patch_size") |
|
|
| |
| patches = input_tensor.unfold(2, patch_height, patch_height).unfold(3, patch_width, patch_width) |
| |
| |
| patches = patches.contiguous().view( |
| batch_size, channels, -1, patch_height, patch_width |
| ).mean(dim=-1).mean(dim=-1) |
| patches = patches.permute(0, 2, 1).contiguous() |
|
|
| return patches |
|
|
|
|
|
|
| class VisionTransformerMoCo(VisionTransformer): |
| def __init__(self, stop_grad_conv1=False, **kwargs): |
| super().__init__(**kwargs) |
| |
| self.build_2d_sincos_position_embedding() |
|
|
| |
| for name, m in self.named_modules(): |
| if isinstance(m, nn.Linear): |
| if 'qkv' in name: |
| |
| val = math.sqrt(6. / float(m.weight.shape[0] // 3 + m.weight.shape[1])) |
| nn.init.uniform_(m.weight, -val, val) |
| else: |
| nn.init.xavier_uniform_(m.weight) |
| nn.init.zeros_(m.bias) |
| nn.init.normal_(self.cls_token, std=1e-6) |
|
|
| if isinstance(self.patch_embed, PatchEmbed): |
| |
| val = math.sqrt(6. / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim)) |
| nn.init.uniform_(self.patch_embed.proj.weight, -val, val) |
| nn.init.zeros_(self.patch_embed.proj.bias) |
|
|
| if stop_grad_conv1: |
| self.patch_embed.proj.weight.requires_grad = False |
| self.patch_embed.proj.bias.requires_grad = False |
|
|
| def build_2d_sincos_position_embedding(self, temperature=10000.): |
| h = self.patch_embed.img_size[0] // self.patch_embed.patch_size[0] |
| w = self.patch_embed.img_size[1] // self.patch_embed.patch_size[1] |
| grid_w = torch.arange(w, dtype=torch.float32) |
| grid_h = torch.arange(h, dtype=torch.float32) |
| grid_w, grid_h = torch.meshgrid(grid_w, grid_h) |
| assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding' |
| pos_dim = self.embed_dim // 4 |
| omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim |
| omega = 1. / (temperature**omega) |
| out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega]) |
| out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega]) |
| pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :] |
|
|
| |
| pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32) |
| self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1)) |
| self.pos_embed.requires_grad = False |
|
|
| def forward_diffusion_output(self, x): |
| x = x.reshape(*x.shape[0:2], -1).permute(0, 2, 1) |
| x = self._pos_embed(x) |
| x = self.patch_drop(x) |
| x = self.norm_pre(x) |
| x = self.blocks(x) |
| x = self.norm(x) |
| return x |
|
|
| class ConvStem(nn.Module): |
| """ |
| ConvStem, from Early Convolutions Help Transformers See Better, Tete et al. https://arxiv.org/abs/2106.14881 |
| """ |
| def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): |
| super().__init__() |
|
|
| assert patch_size == 16, 'ConvStem only supports patch size of 16' |
| assert embed_dim % 8 == 0, 'Embed dimension must be divisible by 8 for ConvStem' |
|
|
| img_size = to_2tuple(img_size) |
| patch_size = to_2tuple(patch_size) |
| self.img_size = img_size |
| self.patch_size = patch_size |
| self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) |
| self.num_patches = self.grid_size[0] * self.grid_size[1] |
| self.flatten = flatten |
|
|
| |
| stem = [] |
| input_dim, output_dim = 3, embed_dim // 8 |
| for l in range(4): |
| stem.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False)) |
| stem.append(nn.BatchNorm2d(output_dim)) |
| stem.append(nn.ReLU(inplace=True)) |
| input_dim = output_dim |
| output_dim *= 2 |
| stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1)) |
| self.proj = nn.Sequential(*stem) |
|
|
| self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() |
|
|
| def forward(self, x): |
| B, C, H, W = x.shape |
| assert H == self.img_size[0] and W == self.img_size[1], \ |
| f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
| x = self.proj(x) |
| if self.flatten: |
| x = x.flatten(2).transpose(1, 2) |
| x = self.norm(x) |
| return x |
|
|
|
|
| def vit_small(**kwargs): |
| model = VisionTransformerMoCo( |
| img_size=256, |
| patch_size=16, embed_dim=384, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) |
| model.default_cfg = _cfg() |
| return model |
|
|
| def vit_base(**kwargs): |
| model = VisionTransformerMoCo( |
| img_size=256, |
| patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) |
| model.default_cfg = _cfg() |
| return model |
|
|
| def vit_large(**kwargs): |
| model = VisionTransformerMoCo( |
| img_size=256, |
| patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) |
| model.default_cfg = _cfg() |
| return model |
|
|
| def vit_conv_small(**kwargs): |
| |
| model = VisionTransformerMoCo( |
| patch_size=16, embed_dim=384, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs) |
| model.default_cfg = _cfg() |
| return model |
|
|
| def vit_conv_base(**kwargs): |
| |
| model = VisionTransformerMoCo( |
| patch_size=16, embed_dim=768, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs) |
| model.default_cfg = _cfg() |
| return model |
|
|
| def build_mlp(num_layers, input_dim, mlp_dim, output_dim, last_bn=True): |
| mlp = [] |
| for l in range(num_layers): |
| dim1 = input_dim if l == 0 else mlp_dim |
| dim2 = output_dim if l == num_layers - 1 else mlp_dim |
|
|
| mlp.append(nn.Linear(dim1, dim2, bias=False)) |
|
|
| if l < num_layers - 1: |
| mlp.append(nn.BatchNorm1d(dim2)) |
| mlp.append(nn.ReLU(inplace=True)) |
| elif last_bn: |
| |
| |
| mlp.append(nn.BatchNorm1d(dim2, affine=False)) |
|
|
| return nn.Sequential(*mlp) |