| |
| |
| |
| |
| |
| |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from itertools import repeat |
| import collections.abc |
| import math |
| import warnings |
|
|
| from torch.nn.init import _calculate_fan_in_and_fan_out |
| import torch.utils.checkpoint as checkpoint |
|
|
| import random |
|
|
| from torchlibrosa.stft import Spectrogram, LogmelFilterBank |
| from torchlibrosa.augmentation import SpecAugmentation |
| from einops import rearrange |
| from itertools import repeat |
| |
|
|
| |
|
|
|
|
| ''' |
| Feature Fusion for Varible-Length Data Processing |
| AFF/iAFF is referred and modified from https://github.com/YimianDai/open-aff/blob/master/aff_pytorch/aff_net/fusion.py |
| According to the paper: Yimian Dai et al, Attentional Feature Fusion, IEEE Winter Conference on Applications of Computer Vision, WACV 2021 |
| ''' |
|
|
| class DAF(nn.Module): |
| ''' |
| 直接相加 DirectAddFuse |
| ''' |
|
|
| def __init__(self): |
| super(DAF, self).__init__() |
|
|
| def forward(self, x, residual): |
| return x + residual |
|
|
|
|
| class iAFF(nn.Module): |
| ''' |
| 多特征融合 iAFF |
| ''' |
|
|
| def __init__(self, channels=64, r=4, type='2D'): |
| super(iAFF, self).__init__() |
| inter_channels = int(channels // r) |
|
|
| if type == '1D': |
| |
| self.local_att = nn.Sequential( |
| nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(channels), |
| ) |
|
|
| |
| self.global_att = nn.Sequential( |
| nn.AdaptiveAvgPool1d(1), |
| nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(channels), |
| ) |
|
|
| |
| self.local_att2 = nn.Sequential( |
| nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(channels), |
| ) |
| |
| self.global_att2 = nn.Sequential( |
| nn.AdaptiveAvgPool1d(1), |
| nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(channels), |
| ) |
| elif type == '2D': |
| |
| self.local_att = nn.Sequential( |
| nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(channels), |
| ) |
|
|
| |
| self.global_att = nn.Sequential( |
| nn.AdaptiveAvgPool2d(1), |
| nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(channels), |
| ) |
|
|
| |
| self.local_att2 = nn.Sequential( |
| nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(channels), |
| ) |
| |
| self.global_att2 = nn.Sequential( |
| nn.AdaptiveAvgPool2d(1), |
| nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(channels), |
| ) |
| else: |
| raise f'the type is not supported' |
|
|
| self.sigmoid = nn.Sigmoid() |
|
|
| def forward(self, x, residual): |
| flag = False |
| xa = x + residual |
| if xa.size(0) == 1: |
| xa = torch.cat([xa,xa],dim=0) |
| flag = True |
| xl = self.local_att(xa) |
| xg = self.global_att(xa) |
| xlg = xl + xg |
| wei = self.sigmoid(xlg) |
| xi = x * wei + residual * (1 - wei) |
|
|
| xl2 = self.local_att2(xi) |
| xg2 = self.global_att(xi) |
| xlg2 = xl2 + xg2 |
| wei2 = self.sigmoid(xlg2) |
| xo = x * wei2 + residual * (1 - wei2) |
| if flag: |
| xo = xo[0].unsqueeze(0) |
| return xo |
|
|
|
|
| class AFF(nn.Module): |
| ''' |
| 多特征融合 AFF |
| ''' |
|
|
| def __init__(self, channels=64, r=4, type='2D'): |
| super(AFF, self).__init__() |
| inter_channels = int(channels // r) |
|
|
| if type == '1D': |
| self.local_att = nn.Sequential( |
| nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(channels), |
| ) |
| self.global_att = nn.Sequential( |
| nn.AdaptiveAvgPool1d(1), |
| nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm1d(channels), |
| ) |
| elif type == '2D': |
| self.local_att = nn.Sequential( |
| nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(channels), |
| ) |
| self.global_att = nn.Sequential( |
| nn.AdaptiveAvgPool2d(1), |
| nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(inter_channels), |
| nn.ReLU(inplace=True), |
| nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), |
| nn.BatchNorm2d(channels), |
| ) |
| else: |
| raise f'the type is not supported.' |
| |
| self.sigmoid = nn.Sigmoid() |
|
|
| def forward(self, x, residual): |
| flag = False |
| xa = x + residual |
| if xa.size(0) == 1: |
| xa = torch.cat([xa,xa],dim=0) |
| flag = True |
| xl = self.local_att(xa) |
| xg = self.global_att(xa) |
| xlg = xl + xg |
| wei = self.sigmoid(xlg) |
| xo = 2 * x * wei + 2 * residual * (1 - wei) |
| if flag: |
| xo = xo[0].unsqueeze(0) |
| return xo |
|
|
|
|
| |
|
|
| def interpolate(x, ratio): |
| """Interpolate data in time domain. This is used to compensate the |
| resolution reduction in downsampling of a CNN. |
| |
| Args: |
| x: (batch_size, time_steps, classes_num) |
| ratio: int, ratio to interpolate |
| Returns: |
| upsampled: (batch_size, time_steps * ratio, classes_num) |
| """ |
| (batch_size, time_steps, classes_num) = x.shape |
| upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1) |
| upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num) |
| return upsampled |
|
|
| def do_mixup(x, mixup_lambda): |
| """ |
| Args: |
| x: (batch_size , ...) |
| mixup_lambda: (batch_size,) |
| Returns: |
| out: (batch_size, ...) |
| """ |
| out = ( |
| x.transpose(0, -1) * mixup_lambda |
| + torch.flip(x, dims=[0]).transpose(0, -1) * (1 - mixup_lambda) |
| ).transpose(0, -1) |
| return out |
|
|
| |
| def _ntuple(n): |
| def parse(x): |
| if isinstance(x, collections.abc.Iterable): |
| return x |
| return tuple(repeat(x, n)) |
| return parse |
|
|
| to_1tuple = _ntuple(1) |
| to_2tuple = _ntuple(2) |
| to_3tuple = _ntuple(3) |
| to_4tuple = _ntuple(4) |
| to_ntuple = _ntuple |
|
|
| def drop_path(x, drop_prob: float = 0., training: bool = False): |
| """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
| This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, |
| the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... |
| See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for |
| changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use |
| 'survival rate' as the argument. |
| """ |
| if drop_prob == 0. or not training: |
| return x |
| keep_prob = 1 - drop_prob |
| shape = (x.shape[0],) + (1,) * (x.ndim - 1) |
| random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) |
| random_tensor.floor_() |
| output = x.div(keep_prob) * random_tensor |
| return output |
|
|
|
|
| class DropPath(nn.Module): |
| """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
| """ |
| def __init__(self, drop_prob=None): |
| super(DropPath, self).__init__() |
| self.drop_prob = drop_prob |
|
|
| def forward(self, x): |
| return drop_path(x, self.drop_prob, self.training) |
|
|
| class PatchEmbed(nn.Module): |
| """ 2D Image to Patch Embedding |
| """ |
| def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, patch_stride = 16, |
| enable_fusion=False, fusion_type='None'): |
| super().__init__() |
| img_size = to_2tuple(img_size) |
| patch_size = to_2tuple(patch_size) |
| patch_stride = to_2tuple(patch_stride) |
| self.img_size = img_size |
| self.patch_size = patch_size |
| self.patch_stride = patch_stride |
| self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1]) |
| self.num_patches = self.grid_size[0] * self.grid_size[1] |
| self.flatten = flatten |
| self.in_chans = in_chans |
| self.embed_dim = embed_dim |
|
|
| self.enable_fusion = enable_fusion |
| self.fusion_type = fusion_type |
| |
| padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2) |
|
|
| if (self.enable_fusion) and (self.fusion_type == 'channel_map'): |
| self.proj = nn.Conv2d(in_chans*4, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding) |
| else: |
| self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding) |
| self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() |
|
|
| if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']): |
| self.mel_conv2d = nn.Conv2d(in_chans, embed_dim, kernel_size=(patch_size[0], patch_size[1]*3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding) |
| if self.fusion_type == 'daf_2d': |
| self.fusion_model = DAF() |
| elif self.fusion_type == 'aff_2d': |
| self.fusion_model = AFF(channels=embed_dim, type='2D') |
| elif self.fusion_type == 'iaff_2d': |
| self.fusion_model = iAFF(channels=embed_dim, type='2D') |
| def forward(self, x, longer_idx = None): |
| if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']): |
| global_x = x[:,0:1,:,:] |
| |
|
|
| |
| B, C, H, W = global_x.shape |
| assert H == self.img_size[0] and W == self.img_size[1], \ |
| f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
| global_x = self.proj(global_x) |
| TW = global_x.size(-1) |
| if len(longer_idx) > 0: |
| |
| local_x = x[longer_idx,1:,:,:].contiguous() |
| B, C, H, W = local_x.shape |
| local_x = local_x.view(B*C,1,H,W) |
| local_x = self.mel_conv2d(local_x) |
| local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3)) |
| local_x = local_x.permute((0,2,3,1,4)).contiguous().flatten(3) |
| TB,TC,TH,_ = local_x.size() |
| if local_x.size(-1) < TW: |
| local_x = torch.cat([local_x, torch.zeros((TB,TC,TH,TW-local_x.size(-1)), device=global_x.device)], dim=-1) |
| else: |
| local_x = local_x[:,:,:,:TW] |
| |
| global_x[longer_idx] = self.fusion_model(global_x[longer_idx],local_x) |
| x = global_x |
| else: |
| B, C, H, W = x.shape |
| assert H == self.img_size[0] and W == self.img_size[1], \ |
| f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
| x = self.proj(x) |
| |
| if self.flatten: |
| x = x.flatten(2).transpose(1, 2) |
| x = self.norm(x) |
| return x |
|
|
| class Mlp(nn.Module): |
| """ MLP as used in Vision Transformer, MLP-Mixer and related networks |
| """ |
| def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): |
| super().__init__() |
| out_features = out_features or in_features |
| hidden_features = hidden_features or in_features |
| self.fc1 = nn.Linear(in_features, hidden_features) |
| self.act = act_layer() |
| self.fc2 = nn.Linear(hidden_features, out_features) |
| self.drop = nn.Dropout(drop) |
|
|
| def forward(self, x): |
| x = self.fc1(x) |
| x = self.act(x) |
| x = self.drop(x) |
| x = self.fc2(x) |
| x = self.drop(x) |
| return x |
|
|
| def _no_grad_trunc_normal_(tensor, mean, std, a, b): |
| |
| |
| def norm_cdf(x): |
| |
| return (1. + math.erf(x / math.sqrt(2.))) / 2. |
|
|
| if (mean < a - 2 * std) or (mean > b + 2 * std): |
| warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " |
| "The distribution of values may be incorrect.", |
| stacklevel=2) |
|
|
| with torch.no_grad(): |
| |
| |
| |
| l = norm_cdf((a - mean) / std) |
| u = norm_cdf((b - mean) / std) |
|
|
| |
| |
| tensor.uniform_(2 * l - 1, 2 * u - 1) |
|
|
| |
| |
| tensor.erfinv_() |
|
|
| |
| tensor.mul_(std * math.sqrt(2.)) |
| tensor.add_(mean) |
|
|
| |
| tensor.clamp_(min=a, max=b) |
| return tensor |
|
|
|
|
| def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): |
| |
| r"""Fills the input Tensor with values drawn from a truncated |
| normal distribution. The values are effectively drawn from the |
| normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` |
| with values outside :math:`[a, b]` redrawn until they are within |
| the bounds. The method used for generating the random values works |
| best when :math:`a \leq \text{mean} \leq b`. |
| Args: |
| tensor: an n-dimensional `torch.Tensor` |
| mean: the mean of the normal distribution |
| std: the standard deviation of the normal distribution |
| a: the minimum cutoff value |
| b: the maximum cutoff value |
| Examples: |
| >>> w = torch.empty(3, 5) |
| >>> nn.init.trunc_normal_(w) |
| """ |
| return _no_grad_trunc_normal_(tensor, mean, std, a, b) |
|
|
|
|
| def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): |
| fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) |
| if mode == 'fan_in': |
| denom = fan_in |
| elif mode == 'fan_out': |
| denom = fan_out |
| elif mode == 'fan_avg': |
| denom = (fan_in + fan_out) / 2 |
|
|
| variance = scale / denom |
|
|
| if distribution == "truncated_normal": |
| |
| trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) |
| elif distribution == "normal": |
| tensor.normal_(std=math.sqrt(variance)) |
| elif distribution == "uniform": |
| bound = math.sqrt(3 * variance) |
| tensor.uniform_(-bound, bound) |
| else: |
| raise ValueError(f"invalid distribution {distribution}") |
|
|
|
|
| def lecun_normal_(tensor): |
| variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') |
|
|
| def window_partition(x, window_size): |
| """ |
| Args: |
| x: (B, H, W, C) |
| window_size (int): window size |
| Returns: |
| windows: (num_windows*B, window_size, window_size, C) |
| """ |
| B, H, W, C = x.shape |
| x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) |
| windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) |
| return windows |
|
|
|
|
| def window_reverse(windows, window_size, H, W): |
| """ |
| Args: |
| windows: (num_windows*B, window_size, window_size, C) |
| window_size (int): Window size |
| H (int): Height of image |
| W (int): Width of image |
| Returns: |
| x: (B, H, W, C) |
| """ |
| B = int(windows.shape[0] / (H * W / window_size / window_size)) |
| x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) |
| x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) |
| return x |
|
|
|
|
| class WindowAttention(nn.Module): |
| r""" Window based multi-head self attention (W-MSA) module with relative position bias. |
| It supports both of shifted and non-shifted window. |
| Args: |
| dim (int): Number of input channels. |
| window_size (tuple[int]): The height and width of the window. |
| num_heads (int): Number of attention heads. |
| qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True |
| qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set |
| attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 |
| proj_drop (float, optional): Dropout ratio of output. Default: 0.0 |
| """ |
|
|
| def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): |
|
|
| super().__init__() |
| self.dim = dim |
| self.window_size = window_size |
| self.num_heads = num_heads |
| head_dim = dim // num_heads |
| self.scale = qk_scale or head_dim ** -0.5 |
|
|
| |
| self.relative_position_bias_table = nn.Parameter( |
| torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) |
|
|
| |
| coords_h = torch.arange(self.window_size[0]) |
| coords_w = torch.arange(self.window_size[1]) |
| coords = torch.stack(torch.meshgrid([coords_h, coords_w])) |
| coords_flatten = torch.flatten(coords, 1) |
| relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] |
| relative_coords = relative_coords.permute(1, 2, 0).contiguous() |
| relative_coords[:, :, 0] += self.window_size[0] - 1 |
| relative_coords[:, :, 1] += self.window_size[1] - 1 |
| relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 |
| relative_position_index = relative_coords.sum(-1) |
| self.register_buffer("relative_position_index", relative_position_index) |
|
|
| self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
| self.attn_drop = nn.Dropout(attn_drop) |
| self.proj = nn.Linear(dim, dim) |
| self.proj_drop = nn.Dropout(proj_drop) |
|
|
| trunc_normal_(self.relative_position_bias_table, std=.02) |
| self.softmax = nn.Softmax(dim=-1) |
|
|
| def forward(self, x, mask=None): |
| """ |
| Args: |
| x: input features with shape of (num_windows*B, N, C) |
| mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None |
| """ |
| B_, N, C = x.shape |
| qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
| q = q * self.scale |
| attn = (q @ k.transpose(-2, -1)) |
|
|
| relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( |
| self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) |
| relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() |
| attn = attn + relative_position_bias.unsqueeze(0) |
|
|
| if mask is not None: |
| nW = mask.shape[0] |
| attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) |
| attn = attn.view(-1, self.num_heads, N, N) |
| attn = self.softmax(attn) |
| else: |
| attn = self.softmax(attn) |
|
|
| attn = self.attn_drop(attn) |
|
|
| x = (attn @ v).transpose(1, 2).reshape(B_, N, C) |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x, attn |
|
|
| def extra_repr(self): |
| return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' |
|
|
|
|
| |
| class SwinTransformerBlock(nn.Module): |
| r""" Swin Transformer Block. |
| Args: |
| dim (int): Number of input channels. |
| input_resolution (tuple[int]): Input resulotion. |
| num_heads (int): Number of attention heads. |
| window_size (int): Window size. |
| shift_size (int): Shift size for SW-MSA. |
| mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. |
| qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True |
| qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. |
| drop (float, optional): Dropout rate. Default: 0.0 |
| attn_drop (float, optional): Attention dropout rate. Default: 0.0 |
| drop_path (float, optional): Stochastic depth rate. Default: 0.0 |
| act_layer (nn.Module, optional): Activation layer. Default: nn.GELU |
| norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm |
| """ |
|
|
| def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, |
| mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., |
| act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_before_mlp='ln'): |
| super().__init__() |
| self.dim = dim |
| self.input_resolution = input_resolution |
| self.num_heads = num_heads |
| self.window_size = window_size |
| self.shift_size = shift_size |
| self.mlp_ratio = mlp_ratio |
| self.norm_before_mlp = norm_before_mlp |
| if min(self.input_resolution) <= self.window_size: |
| |
| self.shift_size = 0 |
| self.window_size = min(self.input_resolution) |
| assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" |
|
|
| self.norm1 = norm_layer(dim) |
| self.attn = WindowAttention( |
| dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, |
| qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) |
|
|
| self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
| if self.norm_before_mlp == 'ln': |
| self.norm2 = nn.LayerNorm(dim) |
| elif self.norm_before_mlp == 'bn': |
| self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(1, 2) |
| else: |
| raise NotImplementedError |
| mlp_hidden_dim = int(dim * mlp_ratio) |
| self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) |
|
|
| if self.shift_size > 0: |
| |
| H, W = self.input_resolution |
| img_mask = torch.zeros((1, H, W, 1)) |
| h_slices = (slice(0, -self.window_size), |
| slice(-self.window_size, -self.shift_size), |
| slice(-self.shift_size, None)) |
| w_slices = (slice(0, -self.window_size), |
| slice(-self.window_size, -self.shift_size), |
| slice(-self.shift_size, None)) |
| cnt = 0 |
| for h in h_slices: |
| for w in w_slices: |
| img_mask[:, h, w, :] = cnt |
| cnt += 1 |
|
|
| mask_windows = window_partition(img_mask, self.window_size) |
| mask_windows = mask_windows.view(-1, self.window_size * self.window_size) |
| attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) |
| attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) |
| else: |
| attn_mask = None |
|
|
| self.register_buffer("attn_mask", attn_mask) |
|
|
| def forward(self, x): |
| |
| H, W = self.input_resolution |
| |
| |
| |
| B, L, C = x.shape |
| |
|
|
| shortcut = x |
| x = self.norm1(x) |
| x = x.view(B, H, W, C) |
|
|
| |
| if self.shift_size > 0: |
| shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) |
| else: |
| shifted_x = x |
|
|
| |
| x_windows = window_partition(shifted_x, self.window_size) |
| x_windows = x_windows.view(-1, self.window_size * self.window_size, C) |
|
|
| |
| attn_windows, attn = self.attn(x_windows, mask=self.attn_mask) |
|
|
| |
| attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) |
| shifted_x = window_reverse(attn_windows, self.window_size, H, W) |
|
|
| |
| if self.shift_size > 0: |
| x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) |
| else: |
| x = shifted_x |
| x = x.view(B, H * W, C) |
|
|
| |
| x = shortcut + self.drop_path(x) |
| x = x + self.drop_path(self.mlp(self.norm2(x))) |
|
|
| return x, attn |
|
|
| def extra_repr(self): |
| return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ |
| f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" |
|
|
|
|
|
|
| class PatchMerging(nn.Module): |
| r""" Patch Merging Layer. |
| Args: |
| input_resolution (tuple[int]): Resolution of input feature. |
| dim (int): Number of input channels. |
| norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm |
| """ |
|
|
| def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): |
| super().__init__() |
| self.input_resolution = input_resolution |
| self.dim = dim |
| self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) |
| self.norm = norm_layer(4 * dim) |
|
|
| def forward(self, x): |
| """ |
| x: B, H*W, C |
| """ |
| H, W = self.input_resolution |
| B, L, C = x.shape |
| assert L == H * W, "input feature has wrong size" |
| assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." |
|
|
| x = x.view(B, H, W, C) |
|
|
| x0 = x[:, 0::2, 0::2, :] |
| x1 = x[:, 1::2, 0::2, :] |
| x2 = x[:, 0::2, 1::2, :] |
| x3 = x[:, 1::2, 1::2, :] |
| x = torch.cat([x0, x1, x2, x3], -1) |
| x = x.view(B, -1, 4 * C) |
|
|
| x = self.norm(x) |
| x = self.reduction(x) |
|
|
| return x |
|
|
| def extra_repr(self): |
| return f"input_resolution={self.input_resolution}, dim={self.dim}" |
|
|
|
|
| class BasicLayer(nn.Module): |
| """ A basic Swin Transformer layer for one stage. |
| Args: |
| dim (int): Number of input channels. |
| input_resolution (tuple[int]): Input resolution. |
| depth (int): Number of blocks. |
| num_heads (int): Number of attention heads. |
| window_size (int): Local window size. |
| mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. |
| qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True |
| qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. |
| drop (float, optional): Dropout rate. Default: 0.0 |
| attn_drop (float, optional): Attention dropout rate. Default: 0.0 |
| drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 |
| norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm |
| downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None |
| use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. |
| """ |
|
|
| def __init__(self, dim, input_resolution, depth, num_heads, window_size, |
| mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., |
| drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, |
| norm_before_mlp='ln'): |
|
|
| super().__init__() |
| self.dim = dim |
| self.input_resolution = input_resolution |
| self.depth = depth |
| self.use_checkpoint = use_checkpoint |
|
|
| |
| self.blocks = nn.ModuleList([ |
| SwinTransformerBlock(dim=dim, input_resolution=input_resolution, |
| num_heads=num_heads, window_size=window_size, |
| shift_size=0 if (i % 2 == 0) else window_size // 2, |
| mlp_ratio=mlp_ratio, |
| qkv_bias=qkv_bias, qk_scale=qk_scale, |
| drop=drop, attn_drop=attn_drop, |
| drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, |
| norm_layer=norm_layer, norm_before_mlp=norm_before_mlp) |
| for i in range(depth)]) |
|
|
| |
| if downsample is not None: |
| self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) |
| else: |
| self.downsample = None |
|
|
| def forward(self, x): |
| attns = [] |
| for blk in self.blocks: |
| if self.use_checkpoint: |
| x = checkpoint.checkpoint(blk, x) |
| else: |
| x, attn = blk(x) |
| if not self.training: |
| attns.append(attn.unsqueeze(0)) |
| if self.downsample is not None: |
| x = self.downsample(x) |
| if not self.training: |
| attn = torch.cat(attns, dim = 0) |
| attn = torch.mean(attn, dim = 0) |
| return x, attn |
| |
| |
| |
| |
| |
| |
| |
|
|
| def extra_repr(self): |
| return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" |
|
|
|
|
| |
| class HTSAT_Swin_Transformer(nn.Module): |
| r"""HTSAT based on the Swin Transformer |
| Args: |
| spec_size (int | tuple(int)): Input Spectrogram size. Default 256 |
| patch_size (int | tuple(int)): Patch size. Default: 4 |
| path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4 |
| in_chans (int): Number of input image channels. Default: 1 (mono) |
| num_classes (int): Number of classes for classification head. Default: 527 |
| embed_dim (int): Patch embedding dimension. Default: 96 |
| depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer. |
| num_heads (tuple(int)): Number of attention heads in different layers. |
| window_size (int): Window size. Default: 8 |
| mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 |
| qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True |
| qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None |
| drop_rate (float): Dropout rate. Default: 0 |
| attn_drop_rate (float): Attention dropout rate. Default: 0 |
| drop_path_rate (float): Stochastic depth rate. Default: 0.1 |
| norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. |
| ape (bool): If True, add absolute position embedding to the patch embedding. Default: False |
| patch_norm (bool): If True, add normalization after patch embedding. Default: True |
| use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False |
| config (module): The configuration Module from config.py |
| """ |
|
|
| def __init__(self, spec_size=256, patch_size=4, patch_stride=(4,4), |
| in_chans=1, num_classes=527, |
| embed_dim=96, depths=[2, 2, 6, 2], num_heads=[4, 8, 16, 32], |
| window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None, |
| drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, |
| norm_layer=nn.LayerNorm, |
| ape=False, patch_norm=True, |
| use_checkpoint=False, norm_before_mlp='ln', config = None, |
| enable_fusion = False, fusion_type = 'None', **kwargs): |
| super(HTSAT_Swin_Transformer, self).__init__() |
|
|
| self.config = config |
| self.spec_size = spec_size |
| self.patch_stride = patch_stride |
| self.patch_size = patch_size |
| self.window_size = window_size |
| self.embed_dim = embed_dim |
| self.depths = depths |
| self.ape = ape |
| self.in_chans = in_chans |
| self.num_classes = num_classes |
| self.num_heads = num_heads |
| self.num_layers = len(self.depths) |
| self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1)) |
| |
| self.drop_rate = drop_rate |
| self.attn_drop_rate = attn_drop_rate |
| self.drop_path_rate = drop_path_rate |
|
|
| self.qkv_bias = qkv_bias |
| self.qk_scale = None |
|
|
| self.patch_norm = patch_norm |
| self.norm_layer = norm_layer if self.patch_norm else None |
| self.norm_before_mlp = norm_before_mlp |
| self.mlp_ratio = mlp_ratio |
|
|
| self.use_checkpoint = use_checkpoint |
|
|
| self.enable_fusion = enable_fusion |
| self.fusion_type = fusion_type |
|
|
| |
| self.freq_ratio = self.spec_size // self.config.mel_bins |
| window = 'hann' |
| center = True |
| pad_mode = 'reflect' |
| ref = 1.0 |
| amin = 1e-10 |
| top_db = None |
| self.interpolate_ratio = 32 |
| |
| self.spectrogram_extractor = Spectrogram(n_fft=config.window_size, hop_length=config.hop_size, |
| win_length=config.window_size, window=window, center=center, pad_mode=pad_mode, |
| freeze_parameters=True) |
| |
| self.logmel_extractor = LogmelFilterBank(sr=config.sample_rate, n_fft=config.window_size, |
| n_mels=config.mel_bins, fmin=config.fmin, fmax=config.fmax, ref=ref, amin=amin, top_db=top_db, |
| freeze_parameters=True) |
| |
| self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, |
| freq_drop_width=8, freq_stripes_num=2) |
| self.bn0 = nn.BatchNorm2d(self.config.mel_bins) |
|
|
|
|
| |
| self.patch_embed = PatchEmbed( |
| img_size=self.spec_size, patch_size=self.patch_size, in_chans=self.in_chans, |
| embed_dim=self.embed_dim, norm_layer=self.norm_layer, patch_stride = patch_stride, |
| enable_fusion=self.enable_fusion, fusion_type=self.fusion_type |
| ) |
|
|
| num_patches = self.patch_embed.num_patches |
| patches_resolution = self.patch_embed.grid_size |
| self.patches_resolution = patches_resolution |
|
|
| |
| if self.ape: |
| self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim)) |
| trunc_normal_(self.absolute_pos_embed, std=.02) |
|
|
| self.pos_drop = nn.Dropout(p=self.drop_rate) |
|
|
| |
| dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))] |
|
|
| |
| self.layers = nn.ModuleList() |
| for i_layer in range(self.num_layers): |
| layer = BasicLayer(dim=int(self.embed_dim * 2 ** i_layer), |
| input_resolution=(patches_resolution[0] // (2 ** i_layer), |
| patches_resolution[1] // (2 ** i_layer)), |
| depth=self.depths[i_layer], |
| num_heads=self.num_heads[i_layer], |
| window_size=self.window_size, |
| mlp_ratio=self.mlp_ratio, |
| qkv_bias=self.qkv_bias, qk_scale=self.qk_scale, |
| drop=self.drop_rate, attn_drop=self.attn_drop_rate, |
| drop_path=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])], |
| norm_layer=self.norm_layer, |
| downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, |
| use_checkpoint=use_checkpoint, |
| norm_before_mlp=self.norm_before_mlp) |
| self.layers.append(layer) |
|
|
| self.norm = self.norm_layer(self.num_features) |
| self.avgpool = nn.AdaptiveAvgPool1d(1) |
| self.maxpool = nn.AdaptiveMaxPool1d(1) |
| |
| SF = self.spec_size // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] // self.freq_ratio |
| self.tscam_conv = nn.Conv2d( |
| in_channels = self.num_features, |
| out_channels = self.num_classes, |
| kernel_size = (SF,3), |
| padding = (0,1) |
| ) |
| self.head = nn.Linear(num_classes, num_classes) |
|
|
| if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']): |
| self.mel_conv1d = nn.Sequential( |
| nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2), |
| nn.BatchNorm1d(64) |
| ) |
| if self.fusion_type == 'daf_1d': |
| self.fusion_model = DAF() |
| elif self.fusion_type == 'aff_1d': |
| self.fusion_model = AFF(channels=64, type='1D') |
| elif self.fusion_type == 'iaff_1d': |
| self.fusion_model = iAFF(channels=64, type='1D') |
| |
| self.apply(self._init_weights) |
|
|
| def _init_weights(self, m): |
| if isinstance(m, nn.Linear): |
| trunc_normal_(m.weight, std=.02) |
| if isinstance(m, nn.Linear) and m.bias is not None: |
| nn.init.constant_(m.bias, 0) |
| elif isinstance(m, nn.LayerNorm): |
| nn.init.constant_(m.bias, 0) |
| nn.init.constant_(m.weight, 1.0) |
|
|
| @torch.jit.ignore |
| def no_weight_decay(self): |
| return {'absolute_pos_embed'} |
|
|
| @torch.jit.ignore |
| def no_weight_decay_keywords(self): |
| return {'relative_position_bias_table'} |
|
|
|
|
| def forward_features(self, x, longer_idx = None): |
| |
|
|
| frames_num = x.shape[2] |
| x = self.patch_embed(x, longer_idx = longer_idx) |
| if self.ape: |
| x = x + self.absolute_pos_embed |
| x = self.pos_drop(x) |
| for i, layer in enumerate(self.layers): |
| x, attn = layer(x) |
| |
| x = self.norm(x) |
| B, N, C = x.shape |
| SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] |
| ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1] |
| x = x.permute(0,2,1).contiguous().reshape(B, C, SF, ST) |
| B, C, F, T = x.shape |
| |
| c_freq_bin = F // self.freq_ratio |
| x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T) |
| x = x.permute(0,1,3,2,4).contiguous().reshape(B, C, c_freq_bin, -1) |
| |
| fine_grained_latent_output = torch.mean(x, dim = 2) |
| fine_grained_latent_output = interpolate(fine_grained_latent_output.permute(0,2,1).contiguous(), 8 * self.patch_stride[1]) |
| |
| latent_output = self.avgpool(torch.flatten(x,2)) |
| latent_output = torch.flatten(latent_output, 1) |
|
|
| |
|
|
| x = self.tscam_conv(x) |
| x = torch.flatten(x, 2) |
| |
| fpx = interpolate(torch.sigmoid(x).permute(0,2,1).contiguous(), 8 * self.patch_stride[1]) |
| |
| x = self.avgpool(x) |
| x = torch.flatten(x, 1) |
|
|
| output_dict = { |
| 'framewise_output': fpx, |
| 'clipwise_output': torch.sigmoid(x), |
| 'fine_grained_embedding': fine_grained_latent_output, |
| 'embedding': latent_output |
| } |
|
|
| return output_dict |
|
|
| def crop_wav(self, x, crop_size, spe_pos = None): |
| time_steps = x.shape[2] |
| tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device) |
| for i in range(len(x)): |
| if spe_pos is None: |
| crop_pos = random.randint(0, time_steps - crop_size - 1) |
| else: |
| crop_pos = spe_pos |
| tx[i][0] = x[i, 0, crop_pos:crop_pos + crop_size,:] |
| return tx |
|
|
| |
| def reshape_wav2img(self, x): |
| B, C, T, F = x.shape |
| target_T = int(self.spec_size * self.freq_ratio) |
| target_F = self.spec_size // self.freq_ratio |
| assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size" |
| |
| if T < target_T: |
| x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True) |
| if F < target_F: |
| x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True) |
| x = x.permute(0,1,3,2).contiguous() |
| x = x.reshape(x.shape[0], x.shape[1], x.shape[2], self.freq_ratio, x.shape[3] // self.freq_ratio) |
| |
| x = x.permute(0,1,3,2,4).contiguous() |
| x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4]) |
| return x |
| |
| |
| def repeat_wat2img(self, x, cur_pos): |
| B, C, T, F = x.shape |
| target_T = int(self.spec_size * self.freq_ratio) |
| target_F = self.spec_size // self.freq_ratio |
| assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size" |
| |
| if T < target_T: |
| x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True) |
| if F < target_F: |
| x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True) |
| x = x.permute(0,1,3,2).contiguous() |
| x = x[:,:,:,cur_pos:cur_pos + self.spec_size] |
| x = x.repeat(repeats = (1,1,4,1)) |
| return x |
|
|
| def forward_generator(self, x: torch.Tensor, mixup_lambda = None, infer_mode = False, device=None): |
|
|
| n = int(x.shape[1]/480000) |
| assert n * 480000 == x.shape[1] |
| x = rearrange(x, 'b (n t) -> (b n) t', n=n) |
| if not self.enable_fusion: |
| |
| x = x.to(device=device, non_blocking=True) |
| x = self.spectrogram_extractor(x) |
| x = self.logmel_extractor(x) |
| x = x.transpose(1, 3) |
| x = self.bn0(x) |
| x = x.transpose(1, 3) |
| if self.training: |
| x = self.spec_augmenter(x) |
|
|
| if self.training and mixup_lambda is not None: |
| x = do_mixup(x, mixup_lambda) |
| |
| x = self.reshape_wav2img(x) |
| |
| |
| |
| longer_idx = None |
| frames_num = x.shape[2] |
| x = self.patch_embed(x, longer_idx = longer_idx) |
| if self.ape: |
| x = x + self.absolute_pos_embed |
| x = self.pos_drop(x) |
| for i, layer in enumerate(self.layers[:3]): |
| if i == 2: |
| for blk in layer.blocks: |
| x, attn = blk(x) |
| |
| x = rearrange(x, '(b n) t c -> b (n t) c', n=n) |
| x = x if (new_x:=(yield x)) is None else new_x |
| x = rearrange(x, 'b (n t) c -> (b n) t c', n=n) |
| else: |
| x, attn = layer(x) |
|
|
|
|
| |
| def forward(self, x: torch.Tensor, mixup_lambda = None, infer_mode = False, device=None): |
|
|
| n = int(x.shape[1] / 480000) |
| assert n * 480000 == x.shape[1] |
| x = rearrange(x, 'b (n t) -> (b n) t', n = n) |
| if not self.enable_fusion: |
| |
| x = x.to(device=device, non_blocking=True) |
| x = self.spectrogram_extractor(x) |
| x = self.logmel_extractor(x) |
| x = x.transpose(1, 3) |
| x = self.bn0(x) |
| x = x.transpose(1, 3) |
| if self.training: |
| x = self.spec_augmenter(x) |
|
|
| if self.training and mixup_lambda is not None: |
| x = do_mixup(x, mixup_lambda) |
| |
| x = self.reshape_wav2img(x) |
| |
| |
| longer_idx = None |
| frames_num = x.shape[2] |
| x = self.patch_embed(x, longer_idx = longer_idx) |
| if self.ape: |
| x = x + self.absolute_pos_embed |
| x = self.pos_drop(x) |
| for i, layer in enumerate(self.layers): |
| x, attn = layer(x) |
| |
| x = self.norm(x) |
| x = rearrange(x, '(b n) t c -> b (n t) c', n = n) |
| return x |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type='None'): |
| try: |
|
|
| assert audio_cfg.model_name in ["tiny", "base", "large"], "model name for HTS-AT is wrong!" |
| if audio_cfg.model_name == "tiny": |
| model = HTSAT_Swin_Transformer( |
| spec_size=256, |
| patch_size=4, |
| patch_stride=(4,4), |
| num_classes=audio_cfg.class_num, |
| embed_dim=96, |
| depths=[2,2,6,2], |
| num_heads=[4,8,16,32], |
| window_size=8, |
| config = audio_cfg, |
| enable_fusion = enable_fusion, |
| fusion_type = fusion_type |
| ) |
| elif audio_cfg.model_name == "base": |
| model = HTSAT_Swin_Transformer( |
| spec_size=256, |
| patch_size=4, |
| patch_stride=(4,4), |
| num_classes=audio_cfg.class_num, |
| embed_dim=128, |
| depths=[2,2,12,2], |
| num_heads=[4,8,16,32], |
| window_size=8, |
| config = audio_cfg, |
| enable_fusion = enable_fusion, |
| fusion_type = fusion_type |
| ) |
| elif audio_cfg.model_name == "large": |
| model = HTSAT_Swin_Transformer( |
| spec_size=256, |
| patch_size=4, |
| patch_stride=(4,4), |
| num_classes=audio_cfg.class_num, |
| embed_dim=256, |
| depths=[2,2,12,2], |
| num_heads=[4,8,16,32], |
| window_size=8, |
| config = audio_cfg, |
| enable_fusion = enable_fusion, |
| fusion_type = fusion_type |
| ) |
| |
| return model |
| except: |
| raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.') |
| |