File size: 5,787 Bytes
bdbd148 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
'''
MobileNetV2 in PyTorch.
论文: "Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation"
参考: https://arxiv.org/abs/1801.04381
主要特点:
1. 引入倒残差结构(Inverted Residual),先升维后降维
2. 使用线性瓶颈(Linear Bottlenecks),去除最后一个ReLU保留特征
3. 使用ReLU6作为激活函数,提高在低精度计算下的鲁棒性
4. 残差连接时使用加法而不是拼接,减少内存占用
'''
import torch
import torch.nn as nn
class Block(nn.Module):
'''倒残差块 (Inverted Residual Block)
结构: expand(1x1) -> depthwise(3x3) -> project(1x1)
特点:
1. 使用1x1卷积先升维再降维(与ResNet相反)
2. 使用深度可分离卷积减少参数量
3. 使用shortcut连接(当stride=1且输入输出通道数相同时)
Args:
in_channels: 输入通道数
out_channels: 输出通道数
expansion: 扩展因子,控制中间层的通道数
stride: 步长,控制特征图大小
'''
def __init__(self, in_channels, out_channels, expansion, stride):
super(Block, self).__init__()
self.stride = stride
channels = expansion * in_channels # 扩展通道数
# 1x1卷积升维
self.conv1 = nn.Conv2d(
in_channels, channels,
kernel_size=1, stride=1, padding=0, bias=False
)
self.bn1 = nn.BatchNorm2d(channels)
# 3x3深度可分离卷积
self.conv2 = nn.Conv2d(
channels, channels,
kernel_size=3, stride=stride, padding=1,
groups=channels, bias=False # groups=channels即为深度可分离卷积
)
self.bn2 = nn.BatchNorm2d(channels)
# 1x1卷积降维(线性瓶颈,不使用激活函数)
self.conv3 = nn.Conv2d(
channels, out_channels,
kernel_size=1, stride=1, padding=0, bias=False
)
self.bn3 = nn.BatchNorm2d(out_channels)
# shortcut连接
self.shortcut = nn.Sequential()
if stride == 1 and in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_channels, out_channels,
kernel_size=1, stride=1, padding=0, bias=False
),
nn.BatchNorm2d(out_channels)
)
self.relu6 = nn.ReLU6(inplace=True)
def forward(self, x):
# 主分支
out = self.relu6(self.bn1(self.conv1(x))) # 升维
out = self.relu6(self.bn2(self.conv2(out))) # 深度卷积
out = self.bn3(self.conv3(out)) # 降维(线性瓶颈)
# shortcut连接(仅在stride=1时)
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class MobileNetV2(nn.Module):
'''MobileNetV2网络
Args:
num_classes: 分类数量
网络配置:
cfg = [(expansion, out_channels, num_blocks, stride), ...]
- expansion: 扩展因子
- out_channels: 输出通道数
- num_blocks: 块的数量
- stride: 第一个块的步长
'''
# 网络结构配置
cfg = [
# (expansion, out_channels, num_blocks, stride)
(1, 16, 1, 1), # conv1
(6, 24, 2, 1), # conv2,注意:原论文stride=2,这里改为1以适应CIFAR10
(6, 32, 3, 2), # conv3
(6, 64, 4, 2), # conv4
(6, 96, 3, 1), # conv5
(6, 160, 3, 2), # conv6
(6, 320, 1, 1), # conv7
]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# 第一层卷积(注意:原论文stride=2,这里改为1以适应CIFAR10)
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
# 主干网络
self.layers = self._make_layers(in_channels=32)
# 最后的1x1卷积
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
# 分类器
self.avgpool = nn.AdaptiveAvgPool2d(1) # 全局平均池化
self.linear = nn.Linear(1280, num_classes)
self.relu6 = nn.ReLU6(inplace=True)
def _make_layers(self, in_channels):
'''构建网络层
Args:
in_channels: 输入通道数
'''
layers = []
for expansion, out_channels, num_blocks, stride in self.cfg:
# 对于每个配置,第一个block使用指定的stride,后续blocks使用stride=1
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(
Block(in_channels, out_channels, expansion, stride)
)
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
# 第一层卷积
out = self.relu6(self.bn1(self.conv1(x)))
# 主干网络
out = self.layers(out)
# 最后的1x1卷积
out = self.relu6(self.bn2(self.conv2(out)))
# 分类器
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
"""测试函数"""
net = MobileNetV2()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
# 打印模型结构
from torchinfo import summary
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net = net.to(device)
summary(net, (2, 3, 32, 32))
if __name__ == '__main__':
test() |