| |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
|
|
| __all__ = ['MobileNetV3', 'mobilenetv3'] |
|
|
|
|
| def conv_bn(inp, oup, stride, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU): |
| return nn.Sequential( |
| conv_layer(inp, oup, 3, stride, 1, bias=False), |
| norm_layer(oup), |
| nlin_layer(inplace=True) |
| ) |
|
|
|
|
| def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU): |
| return nn.Sequential( |
| conv_layer(inp, oup, 1, 1, 0, bias=False), |
| norm_layer(oup), |
| nlin_layer(inplace=True) |
| ) |
|
|
|
|
| class Hswish(nn.Module): |
| def __init__(self, inplace=True): |
| super(Hswish, self).__init__() |
| self.inplace = inplace |
|
|
| def forward(self, x): |
| return x * F.relu6(x + 3., inplace=self.inplace) / 6. |
|
|
|
|
| class Hsigmoid(nn.Module): |
| def __init__(self, inplace=True): |
| super(Hsigmoid, self).__init__() |
| self.inplace = inplace |
|
|
| def forward(self, x): |
| return F.relu6(x + 3., inplace=self.inplace) / 6. |
|
|
|
|
| class SEModule(nn.Module): |
| def __init__(self, channel, reduction=4): |
| super(SEModule, self).__init__() |
| self.avg_pool = nn.AdaptiveAvgPool2d(1) |
| self.fc = nn.Sequential( |
| nn.Linear(channel, channel // reduction, bias=False), |
| nn.ReLU(inplace=True), |
| nn.Linear(channel // reduction, channel, bias=False), |
| Hsigmoid() |
| |
| ) |
|
|
| def forward(self, x): |
| b, c, _, _ = x.size() |
| y = self.avg_pool(x).view(b, c) |
| y = self.fc(y).view(b, c, 1, 1) |
| return x * y.expand_as(x) |
|
|
|
|
| class Identity(nn.Module): |
| def __init__(self, channel): |
| super(Identity, self).__init__() |
|
|
| def forward(self, x): |
| return x |
|
|
|
|
| def make_divisible(x, divisible_by=8): |
| import numpy as np |
| return int(np.ceil(x * 1. / divisible_by) * divisible_by) |
|
|
|
|
| class MobileBottleneck(nn.Module): |
| def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE'): |
| super(MobileBottleneck, self).__init__() |
| assert stride in [1, 2] |
| assert kernel in [3, 5] |
| padding = (kernel - 1) // 2 |
| self.use_res_connect = stride == 1 and inp == oup |
|
|
| conv_layer = nn.Conv2d |
| norm_layer = nn.BatchNorm2d |
| if nl == 'RE': |
| nlin_layer = nn.ReLU |
| elif nl == 'HS': |
| nlin_layer = Hswish |
| else: |
| raise NotImplementedError |
| if se: |
| SELayer = SEModule |
| else: |
| SELayer = Identity |
|
|
| self.conv = nn.Sequential( |
| |
| conv_layer(inp, exp, 1, 1, 0, bias=False), |
| norm_layer(exp), |
| nlin_layer(inplace=True), |
| |
| conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False), |
| norm_layer(exp), |
| SELayer(exp), |
| nlin_layer(inplace=True), |
| |
| conv_layer(exp, oup, 1, 1, 0, bias=False), |
| norm_layer(oup), |
| ) |
|
|
| def forward(self, x): |
| if self.use_res_connect: |
| return x + self.conv(x) |
| else: |
| return self.conv(x) |
|
|
|
|
| class MobileNetV3(nn.Module): |
| def __init__(self, n_class=1000, input_size=224, dropout=0.8, mode='small', width_mult=1.0): |
| super(MobileNetV3, self).__init__() |
| input_channel = 16 |
| last_channel = 1280 |
| if mode == 'large': |
| |
| mobile_setting = [ |
| |
| [3, 16, 16, False, 'RE', 1], |
| [3, 64, 24, False, 'RE', 2], |
| [3, 72, 24, False, 'RE', 1], |
| [5, 72, 40, True, 'RE', 2], |
| [5, 120, 40, True, 'RE', 1], |
| [5, 120, 40, True, 'RE', 1], |
| [3, 240, 80, False, 'HS', 2], |
| [3, 200, 80, False, 'HS', 1], |
| [3, 184, 80, False, 'HS', 1], |
| [3, 184, 80, False, 'HS', 1], |
| [3, 480, 112, True, 'HS', 1], |
| [3, 672, 112, True, 'HS', 1], |
| [5, 672, 160, True, 'HS', 2], |
| [5, 960, 160, True, 'HS', 1], |
| [5, 960, 160, True, 'HS', 1], |
| ] |
| elif mode == 'small': |
| |
| mobile_setting = [ |
| |
| [3, 16, 16, True, 'RE', 2], |
| [3, 72, 24, False, 'RE', 2], |
| [3, 88, 24, False, 'RE', 1], |
| [5, 96, 40, True, 'HS', 2], |
| [5, 240, 40, True, 'HS', 1], |
| [5, 240, 40, True, 'HS', 1], |
| [5, 120, 48, True, 'HS', 1], |
| [5, 144, 48, True, 'HS', 1], |
| [5, 288, 96, True, 'HS', 2], |
| [5, 576, 96, True, 'HS', 1], |
| [5, 576, 96, True, 'HS', 1], |
| ] |
| else: |
| raise NotImplementedError |
|
|
| |
| assert input_size % 32 == 0 |
| last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel |
| self.features = nn.ModuleList([conv_bn(3, input_channel, 2, nlin_layer=Hswish)]) |
| self.classifier = [] |
|
|
| |
| for k, exp, c, se, nl, s in mobile_setting: |
| output_channel = make_divisible(c * width_mult) |
| exp_channel = make_divisible(exp * width_mult) |
| self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl)) |
| input_channel = output_channel |
|
|
| |
| if mode == 'large': |
| last_conv = make_divisible(960 * width_mult) |
| self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)) |
| self.features.append(nn.AdaptiveAvgPool2d(1)) |
| self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0)) |
| self.features.append(Hswish(inplace=True)) |
| elif mode == 'small': |
| last_conv = make_divisible(576 * width_mult) |
| self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)) |
| |
| self.features.append(nn.AdaptiveAvgPool2d(1)) |
| self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0)) |
| self.features.append(Hswish(inplace=True)) |
| else: |
| raise NotImplementedError |
|
|
| |
| |
|
|
| |
| self.classifier = nn.Sequential( |
| nn.Dropout(p=dropout), |
| nn.Linear(last_channel, n_class), |
| ) |
|
|
| self._initialize_weights() |
|
|
| def forward(self, x): |
| '''x = self.features(x) |
| x = x.mean(3).mean(2) |
| x = self.classifier(x) |
| return x''' |
| x2, x3, x4, x5 = None, None, None, None |
| for stage in range(17): |
| x = self.features[stage](x) |
| if stage == 3: |
| x2 = x |
| elif stage == 6: |
| x3 = x |
| elif stage == 12: |
| x4 = x |
| elif stage == 16: |
| x5 = x |
| return x2, x3, x4, x5 |
|
|
| def _initialize_weights(self): |
| |
| for m in self.modules(): |
| if isinstance(m, nn.Conv2d): |
| nn.init.kaiming_normal_(m.weight, mode='fan_out') |
| if m.bias is not None: |
| nn.init.zeros_(m.bias) |
| elif isinstance(m, nn.BatchNorm2d): |
| nn.init.ones_(m.weight) |
| nn.init.zeros_(m.bias) |
| elif isinstance(m, nn.Linear): |
| nn.init.normal_(m.weight, 0, 0.01) |
| if m.bias is not None: |
| nn.init.zeros_(m.bias) |
|
|
|
|
| def mobilenet_v3_large(pretrained=False, **kwargs): |
| model = MobileNetV3(mode='large', **kwargs) |
| if pretrained: |
| state_dict = torch.load('mobilenetv3_large.pth.tar') |
| model.load_state_dict(state_dict, strict=True) |
| |
| return model |
|
|
| def mobilenet_v3_small(pretrained=False, **kwargs): |
| model = MobileNetV3(mode='small', **kwargs) |
| if pretrained: |
| state_dict = torch.load('mobilenetv3_small_67.4.pth.tar') |
| model.load_state_dict(state_dict, strict=True) |
| |
| return model |
|
|