RRFRRF
add some cv models and update readme.md
bdbd148
raw
history blame
7.89 kB
'''
MobileNetV3 in PyTorch.
论文: "Searching for MobileNetV3"
参考: https://arxiv.org/abs/1905.02244
主要特点:
1. 引入基于NAS的网络架构搜索
2. 使用改进的SE注意力机块
3. 使用h-swish激活函数
4. 重新设计了网络的最后几层
5. 提供了Large和Small两个版本
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_activation(name):
'''获取激活函数
Args:
name: 激活函数名称 ('relu' 或 'hardswish')
'''
if name == 'relu':
return nn.ReLU(inplace=True)
elif name == 'hardswish':
return nn.Hardswish(inplace=True)
else:
raise NotImplementedError
class SEModule(nn.Module):
'''Squeeze-and-Excitation模块
通过全局平均池化和两层全连接网络学习通道注意力权重
Args:
channel: 输入通道数
reduction: 降维比例
'''
def __init__(self, channel, reduction=4):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Hardsigmoid(inplace=True)
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c) # squeeze
y = self.fc(y).view(b, c, 1, 1) # excitation
return x * y.expand_as(x) # scale
class Bottleneck(nn.Module):
'''MobileNetV3 Bottleneck
包含:
1. Expansion layer (1x1 conv)
2. Depthwise layer (3x3 or 5x5 depthwise conv)
3. SE module (optional)
4. Projection layer (1x1 conv)
Args:
in_channels: 输入通道数
exp_channels: 扩展层通道数
out_channels: 输出通道数
kernel_size: 深度卷积核大小
stride: 步长
use_SE: 是否使用SE模块
activation: 激活函数类型
use_residual: 是否使用残差连接
'''
def __init__(self, in_channels, exp_channels, out_channels, kernel_size,
stride, use_SE, activation, use_residual=True):
super(Bottleneck, self).__init__()
self.use_residual = use_residual and stride == 1 and in_channels == out_channels
padding = (kernel_size - 1) // 2
layers = []
# Expansion layer
if exp_channels != in_channels:
layers.extend([
nn.Conv2d(in_channels, exp_channels, 1, bias=False),
nn.BatchNorm2d(exp_channels),
get_activation(activation)
])
# Depthwise conv
layers.extend([
nn.Conv2d(
exp_channels, exp_channels, kernel_size,
stride, padding, groups=exp_channels, bias=False
),
nn.BatchNorm2d(exp_channels),
get_activation(activation)
])
# SE module
if use_SE:
layers.append(SEModule(exp_channels))
# Projection layer
layers.extend([
nn.Conv2d(exp_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_residual:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV3(nn.Module):
'''MobileNetV3网络
Args:
num_classes: 分类数量
mode: 'large' 或 'small',选择网络版本
'''
def __init__(self, num_classes=10, mode='small'):
super(MobileNetV3, self).__init__()
if mode == 'large':
# MobileNetV3-Large架构
self.config = [
# k, exp, out, SE, activation, stride
[3, 16, 16, False, 'relu', 1],
[3, 64, 24, False, 'relu', 2],
[3, 72, 24, False, 'relu', 1],
[5, 72, 40, True, 'relu', 2],
[5, 120, 40, True, 'relu', 1],
[5, 120, 40, True, 'relu', 1],
[3, 240, 80, False, 'hardswish', 2],
[3, 200, 80, False, 'hardswish', 1],
[3, 184, 80, False, 'hardswish', 1],
[3, 184, 80, False, 'hardswish', 1],
[3, 480, 112, True, 'hardswish', 1],
[3, 672, 112, True, 'hardswish', 1],
[5, 672, 160, True, 'hardswish', 2],
[5, 960, 160, True, 'hardswish', 1],
[5, 960, 160, True, 'hardswish', 1],
]
init_conv_out = 16
final_conv_out = 960
else:
# MobileNetV3-Small架构
self.config = [
# k, exp, out, SE, activation, stride
[3, 16, 16, True, 'relu', 2],
[3, 72, 24, False, 'relu', 2],
[3, 88, 24, False, 'relu', 1],
[5, 96, 40, True, 'hardswish', 2],
[5, 240, 40, True, 'hardswish', 1],
[5, 240, 40, True, 'hardswish', 1],
[5, 120, 48, True, 'hardswish', 1],
[5, 144, 48, True, 'hardswish', 1],
[5, 288, 96, True, 'hardswish', 2],
[5, 576, 96, True, 'hardswish', 1],
[5, 576, 96, True, 'hardswish', 1],
]
init_conv_out = 16
final_conv_out = 576
# 第一层卷积
self.conv_stem = nn.Sequential(
nn.Conv2d(3, init_conv_out, 3, 2, 1, bias=False),
nn.BatchNorm2d(init_conv_out),
get_activation('hardswish')
)
# 构建Bottleneck层
features = []
in_channels = init_conv_out
for k, exp, out, se, activation, stride in self.config:
features.append(
Bottleneck(in_channels, exp, out, k, stride, se, activation)
)
in_channels = out
self.features = nn.Sequential(*features)
# 最后的卷积层
self.conv_head = nn.Sequential(
nn.Conv2d(in_channels, final_conv_out, 1, bias=False),
nn.BatchNorm2d(final_conv_out),
get_activation('hardswish')
)
# 分类器
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Linear(final_conv_out, num_classes)
)
# 初始化权重
self._initialize_weights()
def _initialize_weights(self):
'''初始化模型权重'''
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.conv_stem(x)
x = self.features(x)
x = self.conv_head(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def test():
"""测试函数"""
# 测试Large版本
net_large = MobileNetV3(mode='large')
x = torch.randn(2, 3, 32, 32)
y = net_large(x)
print('Large output size:', y.size())
# 测试Small版本
net_small = MobileNetV3(mode='small')
y = net_small(x)
print('Small output size:', y.size())
# 打印模型结构
from torchinfo import summary
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net_small = net_small.to(device)
summary(net_small, (2, 3, 32, 32))
if __name__ == '__main__':
test()