File size: 5,103 Bytes
bdbd148 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
'''
GoogLeNet in PyTorch.
Paper: "Going Deeper with Convolutions"
Reference: https://arxiv.org/abs/1409.4842
主要特点:
1. 使用Inception模块,通过多尺度卷积提取特征
2. 采用1x1卷积降维,减少计算量
3. 使用全局平均池化代替全连接层
4. 引入辅助分类器帮助训练(本实现未包含)
'''
import torch
import torch.nn as nn
class Inception(nn.Module):
'''Inception模块
Args:
in_planes: 输入通道数
n1x1: 1x1卷积分支的输出通道数
n3x3red: 3x3卷积分支的降维通道数
n3x3: 3x3卷积分支的输出通道数
n5x5red: 5x5卷积分支的降维通道数
n5x5: 5x5卷积分支的输出通道数
pool_planes: 池化分支的输出通道数
'''
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1卷积分支
self.branch1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 -> 3x3卷积分支
self.branch2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 -> 5x5卷积分支(用两个3x3代替)
self.branch3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3池化 -> 1x1卷积分支
self.branch4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
'''前向传播,将四个分支的输出在通道维度上拼接'''
b1 = self.branch1(x)
b2 = self.branch2(x)
b3 = self.branch3(x)
b4 = self.branch4(x)
return torch.cat([b1, b2, b3, b4], 1)
class GoogLeNet(nn.Module):
'''GoogLeNet/Inception v1网络
特点:
1. 使用Inception模块构建深层网络
2. 通过1x1卷积降维减少计算量
3. 使用全局平均池化代替全连接层减少参数量
'''
def __init__(self, num_classes=10):
super(GoogLeNet, self).__init__()
# 第一阶段:标准卷积层
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
# 第二阶段:2个Inception模块
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) # 输出通道:256
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) # 输出通道:480
# 最大池化层
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
# 第三阶段:5个Inception模块
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) # 输出通道:512
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) # 输出通道:512
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) # 输出通道:512
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) # 输出通道:528
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) # 输出通道:832
# 第四阶段:2个Inception模块
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) # 输出通道:832
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) # 输出通道:1024
# 全局平均池化和分类器
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, num_classes)
def forward(self, x):
# 第一阶段
out = self.pre_layers(x)
# 第二阶段
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
# 第三阶段
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
# 第四阶段
out = self.a5(out)
out = self.b5(out)
# 分类器
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
"""测试函数"""
net = GoogLeNet()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
# 打印模型结构
from torchinfo import summary
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net = net.to(device)
summary(net, (1, 3, 32, 32))
if __name__ == '__main__':
test()
|