| """ |
| HAT (Hybrid Attention Transformer) main model implementation. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import math |
|
|
| from .components import ( |
| RHAG, PatchEmbed, PatchUnEmbed, Upsample, |
| trunc_normal_, window_partition, to_2tuple |
| ) |
|
|
|
|
| class HAT(nn.Module): |
| def __init__(self, img_size=64, patch_size=1, in_chans=3, embed_dim=96, depths=(6, 6, 6, 6), |
| num_heads=(6, 6, 6, 6), window_size=7, compress_ratio=3, squeeze_factor=30, |
| conv_scale=0.01, overlap_ratio=0.5, mlp_ratio=4., qkv_bias=True, qk_scale=None, |
| drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, |
| ape=False, patch_norm=True, use_checkpoint=False, upscale=2, img_range=1., |
| upsampler='', resi_connection='1conv', **kwargs): |
| super(HAT, self).__init__() |
|
|
| self.window_size = window_size |
| self.shift_size = window_size // 2 |
| self.overlap_ratio = overlap_ratio |
|
|
| num_in_ch = in_chans |
| num_out_ch = in_chans |
| num_feat = 64 |
| self.img_range = img_range |
| if in_chans == 3: |
| rgb_mean = (0.4488, 0.4371, 0.4040) |
| self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) |
| else: |
| self.mean = torch.zeros(1, 1, 1, 1) |
| self.upscale = upscale |
| self.upsampler = upsampler |
|
|
| |
| relative_position_index_SA = self.calculate_rpi_sa() |
| relative_position_index_OCA = self.calculate_rpi_oca() |
| self.register_buffer('relative_position_index_SA', relative_position_index_SA) |
| self.register_buffer('relative_position_index_OCA', relative_position_index_OCA) |
|
|
| |
| self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) |
|
|
| |
| self.num_layers = len(depths) |
| self.embed_dim = embed_dim |
| self.ape = ape |
| self.patch_norm = patch_norm |
| self.num_features = embed_dim |
| self.mlp_ratio = mlp_ratio |
|
|
| |
| self.patch_embed = PatchEmbed( |
| img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, |
| norm_layer=norm_layer if self.patch_norm else None) |
| num_patches = self.patch_embed.num_patches |
| patches_resolution = self.patch_embed.patches_resolution |
| self.patches_resolution = patches_resolution |
|
|
| |
| self.patch_unembed = PatchUnEmbed( |
| img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, |
| norm_layer=norm_layer if self.patch_norm else None) |
|
|
| |
| if self.ape: |
| self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) |
| trunc_normal_(self.absolute_pos_embed, std=.02) |
|
|
| self.pos_drop = nn.Dropout(p=drop_rate) |
|
|
| |
| dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] |
|
|
| |
| self.layers = nn.ModuleList() |
| for i_layer in range(self.num_layers): |
| layer = RHAG( |
| dim=embed_dim, |
| input_resolution=(patches_resolution[0], patches_resolution[1]), |
| depth=depths[i_layer], |
| num_heads=num_heads[i_layer], |
| window_size=window_size, |
| compress_ratio=compress_ratio, |
| squeeze_factor=squeeze_factor, |
| conv_scale=conv_scale, |
| overlap_ratio=overlap_ratio, |
| mlp_ratio=self.mlp_ratio, |
| qkv_bias=qkv_bias, |
| qk_scale=qk_scale, |
| drop=drop_rate, |
| attn_drop=attn_drop_rate, |
| drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], |
| norm_layer=norm_layer, |
| downsample=None, |
| use_checkpoint=use_checkpoint, |
| img_size=img_size, |
| patch_size=patch_size, |
| resi_connection=resi_connection) |
| self.layers.append(layer) |
| self.norm = norm_layer(self.num_features) |
|
|
| |
| if resi_connection == '1conv': |
| self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) |
| elif resi_connection == 'identity': |
| self.conv_after_body = nn.Identity() |
|
|
| |
| if self.upsampler == 'pixelshuffle': |
| self.conv_before_upsample = nn.Sequential( |
| nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) |
| self.upsample = Upsample(upscale, num_feat) |
| self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) |
|
|
| self.apply(self._init_weights) |
|
|
| def _init_weights(self, m): |
| if isinstance(m, nn.Linear): |
| trunc_normal_(m.weight, std=.02) |
| if isinstance(m, nn.Linear) and m.bias is not None: |
| nn.init.constant_(m.bias, 0) |
| elif isinstance(m, nn.LayerNorm): |
| nn.init.constant_(m.bias, 0) |
| nn.init.constant_(m.weight, 1.0) |
|
|
| def calculate_rpi_sa(self): |
| coords_h = torch.arange(self.window_size) |
| coords_w = torch.arange(self.window_size) |
| coords = torch.stack(torch.meshgrid([coords_h, coords_w])) |
| coords_flatten = torch.flatten(coords, 1) |
| relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] |
| relative_coords = relative_coords.permute(1, 2, 0).contiguous() |
| relative_coords[:, :, 0] += self.window_size - 1 |
| relative_coords[:, :, 1] += self.window_size - 1 |
| relative_coords[:, :, 0] *= 2 * self.window_size - 1 |
| relative_position_index = relative_coords.sum(-1) |
| return relative_position_index |
|
|
| def calculate_rpi_oca(self): |
| window_size_ori = self.window_size |
| window_size_ext = self.window_size + int(self.overlap_ratio * self.window_size) |
|
|
| coords_h = torch.arange(window_size_ori) |
| coords_w = torch.arange(window_size_ori) |
| coords_ori = torch.stack(torch.meshgrid([coords_h, coords_w])) |
| coords_ori_flatten = torch.flatten(coords_ori, 1) |
|
|
| coords_h = torch.arange(window_size_ext) |
| coords_w = torch.arange(window_size_ext) |
| coords_ext = torch.stack(torch.meshgrid([coords_h, coords_w])) |
| coords_ext_flatten = torch.flatten(coords_ext, 1) |
|
|
| relative_coords = coords_ext_flatten[:, None, :] - coords_ori_flatten[:, :, None] |
| relative_coords = relative_coords.permute(1, 2, 0).contiguous() |
| relative_coords[:, :, 0] += window_size_ori - window_size_ext + 1 |
| relative_coords[:, :, 1] += window_size_ori - window_size_ext + 1 |
| relative_coords[:, :, 0] *= window_size_ori + window_size_ext - 1 |
| relative_position_index = relative_coords.sum(-1) |
| return relative_position_index |
|
|
| def calculate_mask(self, x_size): |
| h, w = x_size |
| img_mask = torch.zeros((1, h, w, 1)) |
| h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) |
| w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) |
| cnt = 0 |
| for h in h_slices: |
| for w in w_slices: |
| img_mask[:, h, w, :] = cnt |
| cnt += 1 |
|
|
| mask_windows = window_partition(img_mask, self.window_size) |
| mask_windows = mask_windows.view(-1, self.window_size * self.window_size) |
| attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) |
| attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) |
| return attn_mask |
|
|
| @torch.jit.ignore |
| def no_weight_decay(self): |
| return {'absolute_pos_embed'} |
|
|
| @torch.jit.ignore |
| def no_weight_decay_keywords(self): |
| return {'relative_position_bias_table'} |
|
|
| def forward_features(self, x): |
| x_size = (x.shape[2], x.shape[3]) |
|
|
| attn_mask = self.calculate_mask(x_size).to(x.device) |
| params = {'attn_mask': attn_mask, 'rpi_sa': self.relative_position_index_SA, 'rpi_oca': self.relative_position_index_OCA} |
|
|
| x = self.patch_embed(x) |
| if self.ape: |
| x = x + self.absolute_pos_embed |
| x = self.pos_drop(x) |
|
|
| for layer in self.layers: |
| x = layer(x, x_size, params) |
|
|
| x = self.norm(x) |
| x = self.patch_unembed(x, x_size) |
| return x |
|
|
| def forward(self, x): |
| self.mean = self.mean.type_as(x) |
| x = (x - self.mean) * self.img_range |
|
|
| if self.upsampler == 'pixelshuffle': |
| x = self.conv_first(x) |
| x = self.conv_after_body(self.forward_features(x)) + x |
| x = self.conv_before_upsample(x) |
| x = self.conv_last(self.upsample(x)) |
|
|
| x = x / self.img_range + self.mean |
| return x |