|
''' |
|
GoogLeNet in PyTorch. |
|
|
|
Paper: "Going Deeper with Convolutions" |
|
Reference: https://arxiv.org/abs/1409.4842 |
|
|
|
主要特点: |
|
1. 使用Inception模块,通过多尺度卷积提取特征 |
|
2. 采用1x1卷积降维,减少计算量 |
|
3. 使用全局平均池化代替全连接层 |
|
4. 引入辅助分类器帮助训练(本实现未包含) |
|
''' |
|
import torch |
|
import torch.nn as nn |
|
|
|
class Inception(nn.Module): |
|
'''Inception模块 |
|
|
|
Args: |
|
in_planes: 输入通道数 |
|
n1x1: 1x1卷积分支的输出通道数 |
|
n3x3red: 3x3卷积分支的降维通道数 |
|
n3x3: 3x3卷积分支的输出通道数 |
|
n5x5red: 5x5卷积分支的降维通道数 |
|
n5x5: 5x5卷积分支的输出通道数 |
|
pool_planes: 池化分支的输出通道数 |
|
''' |
|
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes): |
|
super(Inception, self).__init__() |
|
|
|
|
|
self.branch1 = nn.Sequential( |
|
nn.Conv2d(in_planes, n1x1, kernel_size=1), |
|
nn.BatchNorm2d(n1x1), |
|
nn.ReLU(True), |
|
) |
|
|
|
|
|
self.branch2 = nn.Sequential( |
|
nn.Conv2d(in_planes, n3x3red, kernel_size=1), |
|
nn.BatchNorm2d(n3x3red), |
|
nn.ReLU(True), |
|
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), |
|
nn.BatchNorm2d(n3x3), |
|
nn.ReLU(True), |
|
) |
|
|
|
|
|
self.branch3 = nn.Sequential( |
|
nn.Conv2d(in_planes, n5x5red, kernel_size=1), |
|
nn.BatchNorm2d(n5x5red), |
|
nn.ReLU(True), |
|
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), |
|
nn.BatchNorm2d(n5x5), |
|
nn.ReLU(True), |
|
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), |
|
nn.BatchNorm2d(n5x5), |
|
nn.ReLU(True), |
|
) |
|
|
|
|
|
self.branch4 = nn.Sequential( |
|
nn.MaxPool2d(3, stride=1, padding=1), |
|
nn.Conv2d(in_planes, pool_planes, kernel_size=1), |
|
nn.BatchNorm2d(pool_planes), |
|
nn.ReLU(True), |
|
) |
|
|
|
def forward(self, x): |
|
'''前向传播,将四个分支的输出在通道维度上拼接''' |
|
b1 = self.branch1(x) |
|
b2 = self.branch2(x) |
|
b3 = self.branch3(x) |
|
b4 = self.branch4(x) |
|
return torch.cat([b1, b2, b3, b4], 1) |
|
|
|
|
|
class GoogLeNet(nn.Module): |
|
'''GoogLeNet/Inception v1网络 |
|
|
|
特点: |
|
1. 使用Inception模块构建深层网络 |
|
2. 通过1x1卷积降维减少计算量 |
|
3. 使用全局平均池化代替全连接层减少参数量 |
|
''' |
|
def __init__(self, num_classes=10): |
|
super(GoogLeNet, self).__init__() |
|
|
|
|
|
self.pre_layers = nn.Sequential( |
|
nn.Conv2d(3, 192, kernel_size=3, padding=1), |
|
nn.BatchNorm2d(192), |
|
nn.ReLU(True), |
|
) |
|
|
|
|
|
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) |
|
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) |
|
|
|
|
|
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) |
|
|
|
|
|
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) |
|
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) |
|
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) |
|
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) |
|
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) |
|
|
|
|
|
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) |
|
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) |
|
|
|
|
|
self.avgpool = nn.AvgPool2d(8, stride=1) |
|
self.linear = nn.Linear(1024, num_classes) |
|
|
|
def forward(self, x): |
|
|
|
out = self.pre_layers(x) |
|
|
|
|
|
out = self.a3(out) |
|
out = self.b3(out) |
|
out = self.maxpool(out) |
|
|
|
|
|
out = self.a4(out) |
|
out = self.b4(out) |
|
out = self.c4(out) |
|
out = self.d4(out) |
|
out = self.e4(out) |
|
out = self.maxpool(out) |
|
|
|
|
|
out = self.a5(out) |
|
out = self.b5(out) |
|
|
|
|
|
out = self.avgpool(out) |
|
out = out.view(out.size(0), -1) |
|
out = self.linear(out) |
|
return out |
|
|
|
def test(): |
|
"""测试函数""" |
|
net = GoogLeNet() |
|
x = torch.randn(1, 3, 32, 32) |
|
y = net(x) |
|
print(y.size()) |
|
|
|
|
|
from torchinfo import summary |
|
device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
net = net.to(device) |
|
summary(net, (1, 3, 32, 32)) |
|
|
|
if __name__ == '__main__': |
|
test() |
|
|