| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| from functools import partial |
|
|
| import torch |
| import torch.nn as nn |
|
|
| import timm.models.vision_transformer |
|
|
|
|
| class VisionTransformer(timm.models.vision_transformer.VisionTransformer): |
| """ Vision Transformer with support for global average pooling |
| """ |
| def __init__(self, global_pool=False, **kwargs): |
| super(VisionTransformer, self).__init__(**kwargs) |
|
|
| self.global_pool = global_pool |
| if self.global_pool: |
| norm_layer = kwargs['norm_layer'] |
| embed_dim = kwargs['embed_dim'] |
| self.fc_norm = norm_layer(embed_dim) |
|
|
| del self.norm |
|
|
| def forward_features(self, x): |
| B = x.shape[0] |
| x = self.patch_embed(x) |
|
|
| cls_tokens = self.cls_token.expand(B, -1, -1) |
| x = torch.cat((cls_tokens, x), dim=1) |
| x = x + self.pos_embed |
| x = self.pos_drop(x) |
|
|
| for blk in self.blocks: |
| x = blk(x) |
|
|
| x = x[:, 1:, :] |
|
|
| return x |
|
|
|
|
| def vit_base_patch16(**kwargs): |
| model = VisionTransformer( |
| num_classes=0, |
| patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) |
| return model |
|
|
|
|
| def vit_large_patch16(**kwargs): |
| model = VisionTransformer( |
| num_classes=0, |
| patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) |
| return model |
|
|
|
|
| def vit_huge_patch14(**kwargs): |
| model = VisionTransformer( |
| patch_size=14, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) |
| return model |